diff --git a/spaces/0xSynapse/Image_captioner/app.py b/spaces/0xSynapse/Image_captioner/app.py deleted file mode 100644 index 576dcbf9b1147ac748e08fb031d5e2b821434c29..0000000000000000000000000000000000000000 --- a/spaces/0xSynapse/Image_captioner/app.py +++ /dev/null @@ -1,62 +0,0 @@ -#imported all required libraries -import streamlit as st -import torch -import requests -from PIL import Image -from io import BytesIO -from transformers import ViTFeatureExtractor, AutoTokenizer, VisionEncoderDecoderModel - - -#used a pretrained model hosted on huggingface -loc = "ydshieh/vit-gpt2-coco-en" - -feature_extractor = ViTFeatureExtractor.from_pretrained(loc) -tokenizer = AutoTokenizer.from_pretrained(loc) -model = VisionEncoderDecoderModel.from_pretrained(loc) -model.eval() - -#defined a function for prediction - -def predict(image): - pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values - - with torch.no_grad(): - output_ids = model.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True).sequences - - preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True) - preds = [pred.strip() for pred in preds] - - return preds - -#defined a function for Streamlit App -def app(): - st.title("ImaginateAI") - st.write("ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO Dataset was used for training. This image captioning model might have some biases that I couldn’t figure during testing") - st.write("Upload an image or paste a URL to get predicted captions.") - - upload_option = st.selectbox("Choose an option:", ("Upload Image", "Paste URL")) - - if upload_option == "Upload Image": - uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg"]) - - if uploaded_file is not None: - image = Image.open(uploaded_file) - preds = predict(image) - st.image(image, caption="Uploaded Image", use_column_width=True) - st.write("Predicted Caption:", preds) - - - elif upload_option == "Paste URL": - image_url = st.text_input("Enter Image URL") - if st.button("Submit") and image_url: - try: - response = requests.get(image_url, stream=True) - image = Image.open(BytesIO(response.content)) - preds = predict(image) - st.image(image, caption="Image from URL", use_column_width=True) - st.write("Predicted Caption:", preds) - except: - st.write("Error: Invalid URL or unable to fetch image.") - -if __name__ == "__main__": - app() \ No newline at end of file diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/__init__.py deleted file mode 100644 index a2fc6f5af4a85304b0e23ceb07bfe844fc907f23..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -import requests -import json -import hashlib -import random -import string -from fake_useragent import UserAgent - -class ChatCompletion: - @classmethod - def md5(self, text): - return hashlib.md5(text.encode()).hexdigest()[::-1] - - @classmethod - def get_api_key(self, user_agent): - part1 = str(random.randint(0, 10**11)) - part2 = self.md5(user_agent+self.md5(user_agent+self.md5(user_agent+part1+"x"))) - return f"tryit-{part1}-{part2}" - - @classmethod - def create(self, messages): - user_agent = UserAgent().random - api_key = self.get_api_key(user_agent) - headers = { - "api-key": api_key, - "user-agent": user_agent - } - files = { - "chat_style": (None, "chat"), - "chatHistory": (None, json.dumps(messages)) - } - - r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True) - - for chunk in r.iter_content(chunk_size=None): - r.raise_for_status() - yield chunk.decode() - -class Completion: - @classmethod - def create(self, prompt): - return ChatCompletion.create([ - { - "role": "user", - "content": prompt - } - ]) \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Golmaal Again 1080p Hd Hindi Full Fix Movie.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Golmaal Again 1080p Hd Hindi Full Fix Movie.md deleted file mode 100644 index 529e34e1a0241826de5c5af6a1ff94075ae8a119..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Golmaal Again 1080p Hd Hindi Full Fix Movie.md +++ /dev/null @@ -1,15 +0,0 @@ - -

Golmaal Again: A Hilarious and Spooky Comedy

-

Golmaal Again is a 2017 Hindi comedy film directed by Rohit Shetty and starring Ajay Devgn, Parineeti Chopra, Tabu, Arshad Warsi, Tusshar Kapoor, Shreyas Talpade, Kunal Khemu, Prakash Raj and Neil Nitin Mukesh. It is the fourth installment of the Golmaal film series and a sequel to Golmaal 3 (2010).

-

The film follows the adventures of Gopal, Madhav, Lucky, Laxman 1 and Laxman 2, five friends who grew up in an orphanage and reunite after many years. They discover that their childhood friend Khushi, who they thought was dead, is actually alive and has some supernatural powers. They also encounter a ghost named Anna Mathew, who reveals some shocking secrets about their past.

-

Golmaal Again 1080p hd hindi full movie


DOWNLOADhttps://byltly.com/2uKwWm



-

Golmaal Again is a blend of comedy, horror and action that will keep you entertained throughout. The film has many hilarious scenes, such as the one where Gopal gets scared by a snake or the one where Laxman 2 mimics a lion. The film also has some emotional moments, such as the one where Khushi reunites with her long-lost father or the one where Gopal confesses his love for Khushi.

-

Golmaal Again is a blockbuster hit that grossed over ₹300 crore worldwide and became one of the highest-grossing Indian films of all time. The film received positive reviews from critics and audiences alike, who praised its humor, performances, music and direction. The film also won several awards, including the Filmfare Award for Best Actor in a Comic Role for Ajay Devgn.

-

If you are looking for a fun-filled and spooky movie to watch with your family or friends, Golmaal Again is the perfect choice for you. You can watch Golmaal Again in full HD quality on Disney+ Hotstar[^1^] [^2^] or Bilibili[^3^]. Don't miss this laughter riot that will make you go "Golmaal Golmaal"!

- -

Golmaal Again is the fourth film in the Golmaal series, which started with Golmaal: Fun Unlimited (2006), followed by Golmaal Returns (2008) and Golmaal 3 (2010). The series is known for its slapstick comedy, quirky characters and catchy songs. The films are loosely inspired by the Hollywood comedy franchise Police Academy.

-

The film features a star-studded cast of actors who have worked together in previous Golmaal films. Ajay Devgn plays Gopal, the leader of the gang who is afraid of ghosts and loves Khushi. Parineeti Chopra plays Khushi, the bubbly and innocent girl who has a special connection with Gopal. Tabu plays Anna Mathew, the librarian and ghost whisperer who helps the gang solve the mystery. Arshad Warsi plays Madhav, the prankster who often clashes with Gopal. Tusshar Kapoor plays Lucky, the mute and funny member of the gang. Shreyas Talpade plays Laxman 1, the stammering and loyal friend of Gopal. Kunal Khemu plays Laxman 2, the smart and witty brother of Laxman 1. Prakash Raj plays Sheru Bhai, the don of Ooty who has a grudge against the gang. Neil Nitin Mukesh plays Nikhil, the son of Sheru Bhai who falls in love with Khushi.

-

The film also has some special appearances by actors such as Sanjay Mishra, Johnny Lever, Mukesh Tiwari, Vrajesh Hirjee, Murali Sharma and Sachin Khedekar. The film has a cameo by Nana Patekar, who lends his voice to the ghost of Jamnadas, the owner of the orphanage where the gang grew up. The film also features a song by Ranveer Singh, who dances with the gang in the end credits.

-

81aa517590
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Calculus By Howard Anton 8th Edition Free !FULL! Download.md b/spaces/1gistliPinn/ChatGPT4/Examples/Calculus By Howard Anton 8th Edition Free !FULL! Download.md deleted file mode 100644 index 6f0112c7188320bf5808e1ed1582c70bc9bee8c0..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Calculus By Howard Anton 8th Edition Free !FULL! Download.md +++ /dev/null @@ -1,6 +0,0 @@ -

Calculus By Howard Anton 8th Edition Free Download


Download ••• https://imgfil.com/2uy0oS



-
-Howard Anton's Handbook of Exercise Calculus, 8th edition.Jun 05, 2015 • 75 Likes • 28,712 views. (Photo: Depositphotos) While we try to keep our promises, we must not forget that we still have to keep our promises. We must remember that not all of our promises are true and that we must be careful when we decide what we promise. This is especially true in the financial world. Our financial promises are not perfect, and they don't have to be. The financial promises don't change. 8a78ff9644
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Design My Home Makeover Games APK - A Fun and Relaxing Word Game with Home Decoration.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Design My Home Makeover Games APK - A Fun and Relaxing Word Game with Home Decoration.md deleted file mode 100644 index f909d70498426aa1188480b3fc18d6a95fadd871..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Design My Home Makeover Games APK - A Fun and Relaxing Word Game with Home Decoration.md +++ /dev/null @@ -1,96 +0,0 @@ - -

Design My Home: Makeover Games APK - A Fun and Creative Casual Game

-

Do you love home design and word games? If yes, then you will love Design My Home: Makeover Games APK, a casual game that combines both genres in a fun and creative way. In this game, you can design your dream home, solve word puzzles, and compete with other players around the world. Here is everything you need to know about this game, including how to download and install it, what are its features, and what are some tips and tricks for playing it.

-

What is Design My Home: Makeover Games?

-

Design My Home: Makeover Games is a casual game developed by Holy Cow Studio. The APK has been available since May 2020. Design My Home: Makeover Games has been downloaded 1+ million times. It's currently not in the top ranks on Google Play. It's rated 4.59 out of 5 stars, based on 22,537 ratings. The game has three main aspects:

-

design my home makeover games apk


Downloadhttps://urlin.us/2uT2qm



-

A casual game that lets you design your dream home

-

In this game, you can unleash your inner designer and create beautiful rooms according to your taste and style. You can choose from hundreds of levels and rooms, such as living rooms, bedrooms, kitchens, bathrooms, gardens, and more. You can also customize every detail, such as the walls, floors, furniture, decor, lighting, plants, and accessories. You can mix and match different styles, colors, patterns, and textures to create your own unique designs.

-

A word game that challenges your vocabulary and creativity

-

To design each room, you need to solve word puzzles that are related to the theme of the room. For example, if you are designing a kitchen, you need to find words that are associated with cooking, food, utensils, appliances, etc. The word puzzles are in the form of crosswords or word searches. You need to swipe the letters on the screen to form words. You can use hints or shuffle the letters if you are stuck. The more words you find, the more coins you earn.

-

A social game that lets you share your designs and compete with others

-

You can also share your designs with other players and see their ratings and comments. You can also rate and comment on other players' designs and get inspired by their ideas. You can also join clubs and chat with other designers. You can also participate in tournaments and events where you can compete with other players for prizes and glory.

-

How to download and install Design My Home: Makeover Games APK?

-

If you want to play Design My Home: Makeover Games APK on your Android device, you need to follow these steps:

-

Download the APK file from a trusted source

-

You can download the APK file from [AppBrain](^1^), a reliable website that offers free APK downloads for Android apps. You can also scan the QR code on the website to download the file directly to your device. The APK file size is 99 MB and the latest version is 1.2.9.

-

Enable unknown sources on your device settings

-

Before you can install the APK file, you need to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device. Tap OK to proceed.

-

Install the APK file and enjoy the game

-

Once you have downloaded and enabled unknown sources, you can install the APK file by tapping on it. You may see a prompt that asks you to confirm the installation. Tap Install and wait for the process to finish. After the installation is complete, you can open the game and start designing your home.

-

What are the features of Design My Home: Makeover Games APK?

-

Design My Home: Makeover Games APK has many features that make it a fun and creative casual game. Here are some of them:

-

design my home makeover word life apk
-design my home makeover games offline apk
-design my home makeover games mod apk
-design my home makeover games free download apk
-design my home makeover games for android apk
-design my home makeover games latest version apk
-design my home makeover games hack apk
-design my home makeover games unlimited money apk
-design my home makeover games 3d apk
-design my home makeover games online apk
-design my home makeover games with friends apk
-design my home makeover games no ads apk
-design my home makeover games premium apk
-design my home makeover games pro apk
-design my home makeover games full apk
-design my home makeover games fun apk
-design my home makeover games best apk
-design my home makeover games new apk
-design my home makeover games update apk
-design my home makeover games puzzle apk
-design my home makeover games simulation apk
-design my home makeover games adventure apk
-design my home makeover games casual apk
-design my home makeover games creative apk
-design my home makeover games realistic apk
-design my home makeover games easy apk
-design my home makeover games challenging apk
-design my home makeover games relaxing apk
-design my home makeover games addictive apk
-design my home makeover games educational apk
-design my home makeover games family apk
-design my home makeover games kids apk
-design my home makeover games adults apk
-design my home makeover games girls apk
-design my home makeover games boys apk
-design my home makeover games cute apk
-design my home makeover games beautiful apk
-design my home makeover games awesome apk
-design my home makeover games amazing apk
-design my home makeover games cool apk

-

Hundreds of levels and rooms to design

-

The game has hundreds of levels and rooms that you can design according to your preferences. You can start with simple rooms like bedrooms and living rooms, and progress to more complex ones like kitchens and gardens. You can also unlock special rooms like studios, spas, cinemas, and more. Each room has a different theme and requirement that you need to follow. For example, some rooms may require you to use a certain style or color scheme, while others may give you more freedom to express yourself.

-

Thousands of furniture and decor items to choose from

-

The game has thousands of furniture and decor items that you can use to decorate your rooms. You can choose from different categories, such as sofas, beds, tables, chairs, lamps, rugs, paintings, plants, and more. You can also filter the items by style, color, price, or rating. You can also preview the items before you buy them with your coins. You can also rotate, resize, or move the items to fit your design.

-

Daily rewards and bonuses to boost your progress

-

The game also gives you daily rewards and bonuses that can help you progress faster in the game. You can get free coins, hints, shuffles, stars, or items every day by logging in, watching ads, or completing tasks. You can also get extra rewards by spinning the wheel of fortune or opening the mystery box. You can use these rewards to buy more items, solve more puzzles, or unlock more rooms.

-

Offline mode and cloud save support

-

The game also supports offline mode and cloud save features. This means that you can play the game without an internet connection and your progress will be saved automatically. You can also sync your progress across different devices by logging in with your Facebook account. This way, you can enjoy the game anytime and anywhere.

-

What are the tips and tricks for playing Design My Home: Makeover Games APK?

-

If you want to master Design My Home: Makeover Games APK, here are some tips and tricks that you can follow:

-

Use hints and shuffle when you are stuck on a word puzzle

-

Sometimes, you may find it hard to solve a word puzzle because you don't know the word or you can't see it on the screen. In that case, you can use hints or shuffle to help you out. Hints will reveal one letter of the word for you, while shuffle will rearrange the letters on the screen. You can get hints or shuffle by spending coins or watching ads.

-

Follow the design themes and requirements for each room

-

Each room in the game has a specific theme and requirement that you need to follow in order to complete it. For example, some rooms may require you to use a certain style or color scheme, while others may give you more freedom to express yourself. You can see the theme and requirement at the top of the screen when you enter a room. You can also see how many stars you need to earn in order to finish the room.

-

Collect stars and coins to unlock new items and rooms

-

Stars and coins are the main currencies in the game that you need to collect in order to unlock new items and rooms. You can earn stars by completing word puzzles and designing rooms according to their themes and requirements. You can earn coins by finding words, watching ads, spinning the wheel of fortune, opening the mystery box, or completing tasks. You can also buy coins with real money if you want.

-

Watch ads and complete tasks to earn extra rewards

-

Another way to earn more stars and coins in the game is to watch ads and complete tasks. You can watch ads to get free hints, shuffles, coins, or items. You can also complete tasks that are given to you by the game or by other players. These tasks may involve designing a specific room, finding a certain word, or rating other players' designs. You can get rewards such as coins, stars, items, or badges for completing these tasks.

-

Conclusion

-

Design My Home: Makeover Games APK is a fun and creative casual game that lets you design your dream home, solve word puzzles, and compete with other players. You can download and install the APK file from a trusted source and enjoy the game on your Android device. You can also explore the features of the game, such as hundreds of levels and rooms, thousands of furniture and decor items, daily rewards and bonuses, offline mode and cloud save support, and more. You can also follow some tips and tricks to master the game, such as using hints and shuffle, following the design themes and requirements, collecting stars and coins, watching ads and completing tasks, and more. If you love home design and word games, you should give Design My Home: Makeover Games APK a try.

-

FAQs

-

Here are some frequently asked questions about Design My Home: Makeover Games APK:

-

Q: Is Design My Home: Makeover Games APK safe to download and install?

-

A: Yes, Design My Home: Makeover Games APK is safe to download and install as long as you get it from a trusted source like AppBrain. You should also enable unknown sources on your device settings before installing the APK file.

-

Q: How can I update Design My Home: Makeover Games APK?

-

A: You can update Design My Home: Makeover Games APK by downloading the latest version of the APK file from AppBrain or by checking for updates within the game. You should always update the game to enjoy the latest features and bug fixes.

-

Q: How can I contact the developer of Design My Home: Makeover Games APK?

-

A: You can contact the developer of Design My Home: Makeover Games APK by sending an email to support@holycow.studio or by visiting their website at https://holycow.studio/. You can also follow them on Facebook at https://www.facebook.com/holycowstudio/ or on Instagram at https://www.instagram.com/holycowstudio/.

-

Q: How can I delete Design My Home: Makeover Games APK from my device?

-

A: You can delete Design My Home: Makeover Games APK from your device by going to Settings > Apps > Design My Home > Uninstall. You can also delete the APK file from your device storage if you don't need it anymore.

-

Q: Can I play Design My Home: Makeover Games APK on my PC or laptop?

-

A: Yes, you can play Design My Home: Makeover Games APK on your PC or laptop by using an Android emulator like BlueStacks or NoxPlayer. These emulators will allow you to run Android apps on your PC or laptop. However, you may experience some performance issues or compatibility problems depending on your device specifications.

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bloons TD 6 on PC and Experience the Most Epic Tower Defense Game.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bloons TD 6 on PC and Experience the Most Epic Tower Defense Game.md deleted file mode 100644 index 0be8409207ba124514923c8e14d5f51b3678dcc7..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bloons TD 6 on PC and Experience the Most Epic Tower Defense Game.md +++ /dev/null @@ -1,152 +0,0 @@ -
-

Bloons TD 6: A Fun and Challenging Tower Defense Game

-

If you are a fan of tower defense games, you might have heard of Bloons TD 6, the latest installment in the Bloons Tower Defense series by Ninja Kiwi. Bloons TD 6 is a strategy game that challenges you to pop all the invading balloons (or bloons) before they reach the end of the track. You can use a variety of monkey towers, heroes, and powers to stop them. But be careful, as the bloons come in different shapes, sizes, and abilities, and some of them are very tough to pop.

-

bloons td 6 free download pc


Downloadhttps://urlin.us/2uT1rc



-

In this article, we will give you an overview of what Bloons TD 6 is, how to download and play it on your PC, and how to master it with some useful tips and tricks. Let's get started!

-

What is Bloons TD 6?

-

Bloons TD 6 is a strategy based video game developed and published by Ninja Kiwi. It was released on June 13, 2018 for Android and iOS, and later brought to Steam for Windows and Macintosh. It is the sixth main game in the Bloons Tower Defense series, which started in 2007 as a web browser game.

-

Bloons TD 6 follows the same tower defense formula as its predecessors, but it also introduces some new features and improvements that make it more fun and challenging. Here are some of the main aspects of the game:

-

The basic gameplay of Bloons TD 6

-

The core gameplay of Bloons TD 6 is simple: you have to prevent the bloons from reaching the end of the track by placing monkey towers along the way. Each monkey tower has a different attack range, rate, damage, and cost. You can also upgrade your towers to make them more powerful or give them special abilities.

-

There are four classes of monkey towers: Primary, Military, Magic, and Support. Each class has its own strengths and weaknesses against different types of bloons. For example, Primary towers are good at popping regular bloons, but they struggle against lead or camo bloons. Military towers are good at popping camo bloons, but they are weak against purple bloons. Magic towers can pop almost any bloon, but they are expensive and have low pierce. Support towers do not attack directly, but they provide buffs or debuffs to other towers or bloons.

-

bloons td 6 pc download full version free
-how to get bloons td 6 for free on pc
-bloons td 6 free download windows 10
-bloons td 6 pc game free download
-bloons tower defense 6 free download pc
-bloons td 6 online free no download pc
-bloons td 6 steam free download pc
-bloons td 6 apk free download pc
-bloons td 6 cracked download pc free
-bloons td 6 mod free download pc
-bloons td 6 emulator free download pc
-bloons td 6 bluestacks free download pc
-bloons td 6 play online for free on pc
-bloons td 6 strategy game free download pc
-bloons td 6 latest version free download pc
-bloons td 6 update free download pc
-bloons td 6 multiplayer free download pc
-bloons td 6 co op free download pc
-bloons td 6 sandbox mode free download pc
-bloons td 6 cheats free download pc
-bloons td 6 hack free download pc
-bloons td 6 trainer free download pc
-bloons td 6 save file free download pc
-bloons td 6 custom maps free download pc
-bloons td 6 editor free download pc
-bloons td 6 best towers free download pc
-bloons td 6 tier list free download pc
-bloons td 6 heroes guide free download pc
-bloons td 6 tips and tricks free download pc
-bloons td 6 walkthrough free download pc
-bloons td 6 achievements free download pc
-bloons td 6 challenges free download pc
-bloons td 6 daily rewards free download pc
-bloons td 6 monkey knowledge free download pc
-bloons td 6 insta monkeys free download pc
-bloons td 6 skins free download pc
-bloons td 6 soundtrack free download pc
-bloons td 6 wallpaper free download pc
-bloons td 6 review free download pc
-bloons td 6 reddit free download pc
-bloons td 6 discord server free download pc
-bloons td 6 wiki free download pc
-bloons td 6 official website free download pc
-bloons td 6 system requirements free download pc
-how to install bloons td 6 for free on pc
-how to play bloons td 6 offline for free on pc
-how to transfer bloons td 6 data for free on pc
-how to unlock all monkeys in bloons td 6 for free on pc
-how to get unlimited money in bloons td 6 for free on pc

-

You can choose from several game modes and difficulty levels to suit your preference and skill level. The game modes include Standard (the normal mode), Impoppable (the hardest mode), CHIMPS (a mode where you cannot use Continues, Hearts Lost, Income, Monkey Knowledge, Powers or Selling), Sandbox (a mode where you can test your strategies with unlimited money and lives), Races (a mode where you compete with other players to finish a map as fast as possible), Co-Op (a mode where you can team up with up to three other players), Odysseys (a mode where you have to complete a series of maps with limited tower choices), Boss Events (a mode where you have to face powerful boss bloons), Contested Territory (a 2Ghz or better | | Memory: 4096 MB RAM | Memory: 8192 MB RAM | | Graphics: OpenGL 2.0 compatible, ATI, Nvidia or Intel HD | Graphics: OpenGL 2.0 compatible, ATI, Nvidia or Intel HD | | Storage: 2048 MB available space | Storage: 4096 MB available space | | Sound Card: Windows compatible sound card | Sound Card: Windows compatible sound card |

The steps to download and install Bloons TD 6 on PC

-

Depending on your preference, you can choose one of the following methods to download and install Bloons TD 6 on your PC:

-

Using BlueStacks emulator

-

BlueStacks is a popular Android emulator that allows you to run Android apps and games on your PC. You can use BlueStacks to play Bloons TD 6 on your PC with the same features and performance as on your mobile device. Here are the steps to do this:

-
    -
  1. Download and install BlueStacks from its official website: https://www.bluestacks.com/
  2. -
  3. Launch BlueStacks and sign in with your Google account.
  4. -
  5. Go to the Google Play Store app and search for Bloons TD 6.
  6. -
  7. Click on the Install button and wait for the download and installation to complete.
  8. -
  9. Go to the My Apps tab and click on the Bloons TD 6 icon to launch the game.
  10. -
  11. Enjoy playing Bloons TD 6 on your PC with BlueStacks.
  12. -
-

Using Steam or Epic Games store

-

Steam and Epic Games store are two of the most popular platforms for PC gaming. You can use either of them to buy and play Bloons TD 6 on your PC with enhanced graphics and controls. Here are the steps to do this:

-
    -
  1. Download and install Steam from its official website: https://store.steampowered.com/ or Epic Games store from its official website: https://www.epicgames.com/store/en-US/
  2. -
  3. Create an account and sign in to Steam or Epic Games store.
  4. -
  5. Go to the Store page and search for Bloons TD 6.
  6. -
  7. Click on the Add to Cart button and proceed to checkout.
  8. -
  9. Pay for the game using your preferred payment method.
  10. -
  11. Go to the Library page and click on the Bloons TD 6 icon to launch the game.
  12. -
  13. Enjoy playing Bloons TD 6 on your PC with Steam or Epic Games store.
  14. -
-

How to master Bloons TD 6?

-

Bloons TD 6 is a fun and challenging game that requires strategy, skill, and creativity. If you want to master the game and beat all the levels, modes, and challenges, you need to learn some tips and tricks that can help you improve your gameplay. Here are some of them:

-

The best strategies, tips, and tricks for Bloons TD 6

-

Bloons TD 6 is a game that has many variables and possibilities. There is no one best strategy or solution for every situation. However, there are some general principles and guidelines that can help you make better decisions and optimize your performance. Here are some of them:

-

Choosing the right monkeys, upgrades, and heroes

-

The first step to mastering Bloons TD 6 is choosing the right monkeys, upgrades, and heroes for each map, mode, and difficulty. You need to consider several factors such as the track layout, the bloon types, the available money, the tower restrictions, and the synergy between different towers. You also need to experiment with different combinations and see what works best for you.

-

Some of the most popular and effective monkeys in Bloons TD 6 are:

- -

Some of the most popular and effective heroes in Bloons TD 6 are:

- -

Placing your towers wisely and using abilities effectively

-

The second step to mastering Bloons TD 6 is placing your towers wisely and using their abilities effectively. You need to consider several factors such as the track layout, the line of sight, the range, the cost, the synergy, and the timing. You also need to experiment with different placements and see what works best for you.

-

Some of the general tips for placing your towers are:

- -

Some of the general tips for using your abilities are:

- -

Gaining experience and unlocking new content

-

The third step to mastering Bloons TD 6 is gaining experience and unlocking new content. You need to play the game regularly and complete different levels, modes, and challenges to earn experience points (XP) and monkey money (MM). You can use XP to unlock new towers, upgrades, heroes, maps, modes, and achievements. You can use MM to buy new powers, insta-monkeys, skins, trophies, and more. You can also earn trophies by completing achievements or participating in events. You can use trophies to buy cosmetic items from the trophy store.

-

Some of the ways to gain more XP and MM are:

- -

Conclusion

-

Bloons TD 6 is a fun and challenging tower defense game that offers hours of entertainment and replay value. You can enjoy the game on your mobile device or your PC, and you can customize your game experience with various features and options. You can also improve your skills and strategies by learning from the tips and tricks we shared in this article. We hope you found this article helpful and informative, and we wish you good luck and have fun popping bloons!

-

FAQs

-

Here are some of the frequently asked questions about Bloons TD 6:

-

197e85843d
-
-
\ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Baixe o Livro de Regras do RPG Ordem Paranormal criado por Cellbit e seus amigos.md b/spaces/1phancelerku/anime-remove-background/Baixe o Livro de Regras do RPG Ordem Paranormal criado por Cellbit e seus amigos.md deleted file mode 100644 index eee32dd6e44472a886106605b6ff2d1e320b6dd5..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Baixe o Livro de Regras do RPG Ordem Paranormal criado por Cellbit e seus amigos.md +++ /dev/null @@ -1,124 +0,0 @@ -
-

Livro de Regras RPG Cellbit PDF Download: Tudo o que você precisa saber

-

Você é fã do RPG Ordem Paranormal, criado pelo youtuber Cellbit? Quer saber como baixar o livro de regras oficial do jogo em PDF? Então você veio ao lugar certo. Neste artigo, vamos te contar tudo o que você precisa saber sobre o livro de regras RPG Cellbit PDF download, incluindo:

-

livro de regras rpg cellbit pdf download


DOWNLOAD ->>> https://jinyurl.com/2uNStv



- -

Ficou curioso? Então continue lendo e descubra tudo sobre o livro de regras RPG Cellbit PDF download.

-

O que é o RPG Ordem Paranormal e como jogar

-

O RPG Ordem Paranormal é um jogo de mesa criado por Rafael Lange, mais conhecido como Cellbit, um dos maiores streamers e youtubers do Brasil. O jogo se passa em um universo onde a realidade é ameaçada por criaturas sobrenaturais do Outro Lado, e os jogadores interpretam agentes da Ordem da Realidade, uma organização secreta que luta para proteger o mundo dos perigos paranormais.

-

O jogo é baseado no sistema d20, o mesmo usado em Dungeons & Dragons e Pathfinder, mas com algumas adaptações e inovações feitas por Cellbit e sua equipe. O jogo usa dados de 20 lados (d20) para resolver as ações dos personagens, e cada personagem tem atributos, habilidades, equipamentos e rituais que definem suas capacidades e características.

-

livro de regras rpg cellbit pdf grátis
-como baixar o livro de regras rpg cellbit
-ordem paranormal rpg livro de regras pdf
-livro de regras rpg cellbit online
-livro de regras rpg cellbit jambo editora
-resenha do livro de regras rpg cellbit
-livro de regras rpg cellbit capa dura
-livro de regras rpg cellbit versão final
-livro de regras rpg cellbit ficha de personagem
-livro de regras rpg cellbit preço
-livro de regras rpg cellbit comprar
-livro de regras rpg cellbit pré-venda
-livro de regras rpg cellbit lançamento
-livro de regras rpg cellbit sinopse
-livro de regras rpg cellbit pdf completo
-livro de regras rpg cellbit epub
-livro de regras rpg cellbit mobi
-livro de regras rpg cellbit amazon
-livro de regras rpg cellbit mercado livre
-livro de regras rpg cellbit submarino
-livro de regras rpg cellbit americanas
-livro de regras rpg cellbit saraiva
-livro de regras rpg cellbit cultura
-livro de regras rpg cellbit travessa
-livro de regras rpg cellbit martins fontes
-livro de regras rpg cellbit leitura
-livro de regras rpg cellbit pdf drive
-livro de regras rpg cellbit pdf mega
-livro de regras rpg cellbit pdf mediafire
-livro de regras rpg cellbit pdf 4shared
-livro de regras rpg cellbit pdf dropbox
-livro de regras rpg cellbit pdf archive.org
-livro de regras rpg cellbit pdf studocu
-livro de regras rpg cellbit pdf scribd
-livro de regras rpg cellbit pdf academia.edu
-resumo do livro de regras rpg cellbit pdf
-análise do livro de regras rpg cellbit pdf
-crítica do livro de regras rpg cellbit pdf
-opinião sobre o livro de regras rpg cellbit pdf
-comentários sobre o livro de regras rpg cellbit pdf
-dicas para o livro de regras rpg cellbit pdf
-guia para o livro de regras rpg cellbit pdf
-tutorial para o livro de regras rpg cellbit pdf
-vídeo sobre o livro de regras rpg cellbit pdf
-podcast sobre o livro de regras rpg cellbit pdf
-blog sobre o livro de regras rpg cellbit pdf
-site sobre o livro de regras rpg cellbit pdf
-fórum sobre o livro de regras rpg cellbit pdf
-grupo sobre o livro de regras rpg cellbit pdf
-comunidade sobre o livro de regras rpg cellbit pdf

-

O jogo também tem um forte foco na narrativa e na interpretação dos personagens, incentivando os jogadores a criarem histórias envolventes e imersivas. O jogo é dividido em capítulos, que são sessões de jogo com um objetivo definido. Cada capítulo pode ser jogado em uma ou mais sessões, dependendo do ritmo e do estilo dos jogadores e do mestre.

-

Como baixar o livro de regras oficial do jogo em PDF

-

O livro de regras oficial do RPG Ordem Paranormal foi produzido por Cellbit em parceria com a editora Jambô, e está disponível para compra no site da editora. O livro tem mais de 300 páginas e contém todas as regras para criar personagens, conduzir as aventuras, usar os rituais e enfrentar os inimigos do Outro Lado.

-

Além disso, o livro também traz informações sobre o cenário do jogo, incluindo a história da Ordem da Realidade, as principais organizações e facções do mundo, os tipos de criaturas sobrenaturais que existem no Outro Lado e as regiões onde as aventuras podem se passar.

-

Para baixar o livro de regras RPG Cellbit PDF download, você precisa comprar o livro físico no site da Jambô. Ao fazer isso, você recebe um link para baixar o arquivo em PDF do livro. Você pode ler o livro no seu computador, tablet ou celular, ou imprimir uma cópia para usar nas suas sessões de jogo.

-

Quais são as principais novidades e características do livro

-

O livro de regras RPG Cellbit PDF download traz várias novidades e características que tornam o jogo único e divertido. Algumas delas são:

- -

Onde assistir as campanhas de Cellbit e seus convidados

-

Se você quer se inspirar para jogar o RPG Ordem Paranormal ou simplesmente se divertir assistindo as aventuras de Cellbit e seus convidados, você pode acompanhar as campanhas que ele transmite em seu canal do YouTube. Até o momento, ele já realizou três campanhas completas:

- -

Você pode assistir todas essas campanhas no canal do Cellbit no YouTube, na playlist chamada RPG Ordem Paranormal. Você também pode conferir as artes, os memes, os comentários e as teorias dos fãs nas redes sociais, usando as hashtags #OrdemParanormal, #OSegredoNaFloresta, #VampiroAMascara e #Alice.

-

Como adaptar o cenário para outros sistemas de RPG

-

Se você gostou do cenário do RPG Ordem Paranormal, mas prefere usar outro sistema de RPG para jogar, não se preocupe. Você pode adaptar o cenário para o sistema que você quiser, usando algumas dicas simples:

- -

O importante é se divertir e aproveitar o cenário do RPG Ordem Paranormal da melhor forma possível.

-

Conclusão

-

Neste artigo, você aprendeu tudo o que precisa saber sobre o livro de regras RPG Cellbit PDF download. Você viu o que é o RPG Ordem Paranormal e como jogar, como baixar o livro de regras oficial do jogo em PDF, quais são as principais novidades e características do livro, onde assistir as campanhas de Cellbit e seus convidados e como adaptar o cenário para outros sistemas de RPG.

-

Agora você está pronto para se tornar um agente da Ordem da Realidade e viver aventuras incríveis no universo do RPG Ordem Paranormal. Esperamos que você tenha gostado deste artigo e que ele tenha sido útil para você. Se você tiver alguma dúvida ou sugestão, deixe um comentário abaixo. E se você quiser ler mais artigos sobre RPGs e outros assuntos interessantes, continue acompanhando o nosso blog.

-

FAQs

-

O que é o Outro Lado?

-

O Outro Lado é a dimensão paralela onde habitam as criaturas sobrenaturais que ameaçam a realidade. O Outro Lado é um lugar caótico, escuro e perigoso, onde as leis da física e da lógica não se aplicam. O Outro Lado pode ser acessado por meio de portais, rituais ou eventos anômalos, mas é um lugar hostil e mortal para os humanos.

-

Quem é Cellbit?

-

Cellbit é o criador do RPG Ordem Paranormal e um dos maiores streamers e youtubers do Brasil. Ele começou a fazer vídeos de jogos em 2012, e desde então ganhou milhões de fãs e seguidores. Ele é conhecido por seu humor, sua criatividade e sua paixão por RPGs. Ele também é o fundador da Cellbit Produções, uma empresa de entretenimento que produz conteúdo para diversas plataformas.

-

O que é a Jambô?

-

A Jambô é a editora parceira de Cellbit na produção do livro de regras RPG Cellbit PDF download. A Jambô é uma das maiores editoras de RPG do Brasil, responsável por publicar títulos como Tormenta, 3D&T, Dragon Age, Mutantes & Malfeitores, Reinos de Ferro e muitos outros. A Jambô também publica livros de ficção, quadrinhos e revistas especializadas em RPGs e cultura nerd.

-

Como jogar RPG online?

-

Se você quer jogar RPG online com seus amigos, existem várias ferramentas que podem te ajudar. Algumas delas são:

- -

Como aprender mais sobre RPGs?

-

Se você quer aprender mais sobre RPGs, existem várias fontes de informação e entretenimento que podem te ajudar. Algumas delas são:

-

401be4b1e0
-
-
\ No newline at end of file diff --git a/spaces/2023Liu2023/bingo/src/app/loading.css b/spaces/2023Liu2023/bingo/src/app/loading.css deleted file mode 100644 index eaaab6a86a228334c4eca3c5368ae6f0f593d405..0000000000000000000000000000000000000000 --- a/spaces/2023Liu2023/bingo/src/app/loading.css +++ /dev/null @@ -1,68 +0,0 @@ -::-webkit-scrollbar { - width: 10px; - height: 10px; - display: none; -} - -::-webkit-scrollbar-button:start:decrement, -::-webkit-scrollbar-button:end:increment { - height: 30px; - background-color: transparent; -} - -::-webkit-scrollbar-track-piece { - background-color: #3b3b3b; - -webkit-border-radius: 16px; -} - -::-webkit-scrollbar-thumb:vertical { - height: 50px; - background-color: #666; - border: 1px solid #eee; - -webkit-border-radius: 6px; -} - -/* loading start */ -.loading-spinner { - display: flex; - justify-content: center; - align-items: center; - height: 100vh; - opacity: 1; - transition: opacity .8s ease-out; -} - -.loading-spinner.hidden { - opacity: 0; -} - -.loading-spinner>div { - width: 30px; - height: 30px; - background: linear-gradient(90deg, #2870EA 10.79%, #1B4AEF 87.08%); - - border-radius: 100%; - display: inline-block; - animation: sk-bouncedelay 1.4s infinite ease-in-out both; -} - -.loading-spinner .bounce1 { - animation-delay: -0.32s; -} - -.loading-spinner .bounce2 { - animation-delay: -0.16s; -} - -@keyframes sk-bouncedelay { - - 0%, - 80%, - 100% { - transform: scale(0); - } - - 40% { - transform: scale(1.0); - } -} diff --git a/spaces/AB-TW/team-ai/agents/tools/smart_domain/common.py b/spaces/AB-TW/team-ai/agents/tools/smart_domain/common.py deleted file mode 100644 index e266cc9da05dfeabc5b35e753d0d6c268df51e66..0000000000000000000000000000000000000000 --- a/spaces/AB-TW/team-ai/agents/tools/smart_domain/common.py +++ /dev/null @@ -1,18 +0,0 @@ -tech_prefix = """You are a software developer. {task} - -===TechStack -{tech_stack} -===END OF TechStack - -===Architecture -{architecture} -===END OF Architecture - -===TestStrategy -{test_strategy} -===END OF TestStrategy - -""" - -def getPrefix(task, tech_stack, architecture, test_strategy): - return tech_prefix.format(task=task, tech_stack=tech_stack, architecture=architecture, test_strategy=test_strategy) \ No newline at end of file diff --git a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/audio/stft.py b/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/audio/stft.py deleted file mode 100644 index 2aa1ac89277734a6676c20a81bf88e21e8ca7aa9..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/audio/stft.py +++ /dev/null @@ -1,180 +0,0 @@ -import torch -import torch.nn.functional as F -import numpy as np -from scipy.signal import get_window -from librosa.util import pad_center, tiny -from librosa.filters import mel as librosa_mel_fn - -from audioldm.audio.audio_processing import ( - dynamic_range_compression, - dynamic_range_decompression, - window_sumsquare, -) - - -class STFT(torch.nn.Module): - """adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft""" - - def __init__(self, filter_length, hop_length, win_length, window="hann"): - super(STFT, self).__init__() - self.filter_length = filter_length - self.hop_length = hop_length - self.win_length = win_length - self.window = window - self.forward_transform = None - scale = self.filter_length / self.hop_length - fourier_basis = np.fft.fft(np.eye(self.filter_length)) - - cutoff = int((self.filter_length / 2 + 1)) - fourier_basis = np.vstack( - [np.real(fourier_basis[:cutoff, :]), np.imag(fourier_basis[:cutoff, :])] - ) - - forward_basis = torch.FloatTensor(fourier_basis[:, None, :]) - inverse_basis = torch.FloatTensor( - np.linalg.pinv(scale * fourier_basis).T[:, None, :] - ) - - if window is not None: - assert filter_length >= win_length - # get window and zero center pad it to filter_length - fft_window = get_window(window, win_length, fftbins=True) - fft_window = pad_center(fft_window, filter_length) - fft_window = torch.from_numpy(fft_window).float() - - # window the bases - forward_basis *= fft_window - inverse_basis *= fft_window - - self.register_buffer("forward_basis", forward_basis.float()) - self.register_buffer("inverse_basis", inverse_basis.float()) - - def transform(self, input_data): - num_batches = input_data.size(0) - num_samples = input_data.size(1) - - self.num_samples = num_samples - - # similar to librosa, reflect-pad the input - input_data = input_data.view(num_batches, 1, num_samples) - input_data = F.pad( - input_data.unsqueeze(1), - (int(self.filter_length / 2), int(self.filter_length / 2), 0, 0), - mode="reflect", - ) - input_data = input_data.squeeze(1) - - forward_transform = F.conv1d( - input_data, - torch.autograd.Variable(self.forward_basis, requires_grad=False), - stride=self.hop_length, - padding=0, - ).cpu() - - cutoff = int((self.filter_length / 2) + 1) - real_part = forward_transform[:, :cutoff, :] - imag_part = forward_transform[:, cutoff:, :] - - magnitude = torch.sqrt(real_part**2 + imag_part**2) - phase = torch.autograd.Variable(torch.atan2(imag_part.data, real_part.data)) - - return magnitude, phase - - def inverse(self, magnitude, phase): - recombine_magnitude_phase = torch.cat( - [magnitude * torch.cos(phase), magnitude * torch.sin(phase)], dim=1 - ) - - inverse_transform = F.conv_transpose1d( - recombine_magnitude_phase, - torch.autograd.Variable(self.inverse_basis, requires_grad=False), - stride=self.hop_length, - padding=0, - ) - - if self.window is not None: - window_sum = window_sumsquare( - self.window, - magnitude.size(-1), - hop_length=self.hop_length, - win_length=self.win_length, - n_fft=self.filter_length, - dtype=np.float32, - ) - # remove modulation effects - approx_nonzero_indices = torch.from_numpy( - np.where(window_sum > tiny(window_sum))[0] - ) - window_sum = torch.autograd.Variable( - torch.from_numpy(window_sum), requires_grad=False - ) - window_sum = window_sum - inverse_transform[:, :, approx_nonzero_indices] /= window_sum[ - approx_nonzero_indices - ] - - # scale by hop ratio - inverse_transform *= float(self.filter_length) / self.hop_length - - inverse_transform = inverse_transform[:, :, int(self.filter_length / 2) :] - inverse_transform = inverse_transform[:, :, : -int(self.filter_length / 2) :] - - return inverse_transform - - def forward(self, input_data): - self.magnitude, self.phase = self.transform(input_data) - reconstruction = self.inverse(self.magnitude, self.phase) - return reconstruction - - -class TacotronSTFT(torch.nn.Module): - def __init__( - self, - filter_length, - hop_length, - win_length, - n_mel_channels, - sampling_rate, - mel_fmin, - mel_fmax, - ): - super(TacotronSTFT, self).__init__() - self.n_mel_channels = n_mel_channels - self.sampling_rate = sampling_rate - self.stft_fn = STFT(filter_length, hop_length, win_length) - mel_basis = librosa_mel_fn( - sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax - ) - mel_basis = torch.from_numpy(mel_basis).float() - self.register_buffer("mel_basis", mel_basis) - - def spectral_normalize(self, magnitudes, normalize_fun): - output = dynamic_range_compression(magnitudes, normalize_fun) - return output - - def spectral_de_normalize(self, magnitudes): - output = dynamic_range_decompression(magnitudes) - return output - - def mel_spectrogram(self, y, normalize_fun=torch.log): - """Computes mel-spectrograms from a batch of waves - PARAMS - ------ - y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1] - - RETURNS - ------- - mel_output: torch.FloatTensor of shape (B, n_mel_channels, T) - """ - assert torch.min(y.data) >= -1, torch.min(y.data) - assert torch.max(y.data) <= 1, torch.max(y.data) - - magnitudes, phases = self.stft_fn.transform(y) - magnitudes = magnitudes.data - mel_output = torch.matmul(self.mel_basis, magnitudes) - mel_output = self.spectral_normalize(mel_output, normalize_fun) - energy = torch.norm(magnitudes, dim=1) - - log_magnitudes = self.spectral_normalize(magnitudes, normalize_fun) - - return mel_output, log_magnitudes, energy diff --git a/spaces/AP123/text-to-3D/README.md b/spaces/AP123/text-to-3D/README.md deleted file mode 100644 index c7ad6b136cb7b2effeb560b4d0ada2a71b75783a..0000000000000000000000000000000000000000 --- a/spaces/AP123/text-to-3D/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Point-e Demo -emoji: 🐢 -colorFrom: yellow -colorTo: blue -sdk: gradio -sdk_version: 3.14.0 -app_file: app.py -pinned: false -duplicated_from: anzorq/point-e_demo ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/perspective/Perspective.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/perspective/Perspective.js deleted file mode 100644 index 1fb762793ce3386c1b8e49396946c4471f9160a1..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/perspective/Perspective.js +++ /dev/null @@ -1,2 +0,0 @@ -import { ContainerPerspective } from '../../../plugins/perspectiveimage.js'; -export default ContainerPerspective; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/ScrollBar.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/ScrollBar.d.ts deleted file mode 100644 index 724afe4f0bfb534c903b6233da044cf3e8dd6302..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollbar/ScrollBar.d.ts +++ /dev/null @@ -1,67 +0,0 @@ -// import * as Phaser from 'phaser'; -import Sizer from '../sizer/Sizer'; -import RoundRecrangle from '../../../plugins/roundrectangle'; - -export default ScrollBar; - -declare namespace ScrollBar { - - type SliderInputTypes = 0 | 1 | -1 | 'drag' | 'pan' | 'click' | 'none'; - - interface IConfig extends Sizer.IConfig { - space?: { - left?: number, - right?: number, - top?: number, - bottom?: number, - }, - - background?: Phaser.GameObjects.GameObject, - - buttons?: { - top?: Phaser.GameObjects.GameObject, - bottom?: Phaser.GameObjects.GameObject, - left?: Phaser.GameObjects.GameObject, - right?: Phaser.GameObjects.GameObject, - - step?: number, - }, - - slider?: { - background?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig, - track?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig, - indicator?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig, - thumb?: Phaser.GameObjects.GameObject | RoundRecrangle.IConfig, - input?: SliderInputTypes, - gap?: number, - easeValue?: { - duration?: number, - ease?: string - }, - } - - valuechangeCallback?: (newValue: number, oldValue: number, ScrollBar: ScrollBar) => void, - - enable?: boolean, - } -} - -declare class ScrollBar extends Sizer { - constructor( - scene: Phaser.Scene, - config?: ScrollBar.IConfig - ); - - value: number; - getValue(min?: number, max?: number): number; - setValue(value?: number, min?: number, max?: number): this; - addValue(inc?: number, min?: number, max?: number): this; - - easeValueTo(value?: number, min?: number, max?: number): this; - stopEaseValue(): this; - setEaseValueDuration(duration: number): this; - setEaseValueFunction(ease: string): this; - - setEnable(enable?: boolean): this; - enable: boolean; -} \ No newline at end of file diff --git a/spaces/Algoworks/Image_Face_Upscale_Restoration-GFPGAN_pub/app.py b/spaces/Algoworks/Image_Face_Upscale_Restoration-GFPGAN_pub/app.py deleted file mode 100644 index 67fcac0171bbb77d2b1d3b23b7293635b6297e28..0000000000000000000000000000000000000000 --- a/spaces/Algoworks/Image_Face_Upscale_Restoration-GFPGAN_pub/app.py +++ /dev/null @@ -1,142 +0,0 @@ -import os - -import cv2 -import gradio as gr -import torch -from basicsr.archs.srvgg_arch import SRVGGNetCompact -from gfpgan.utils import GFPGANer -from realesrgan.utils import RealESRGANer - -os.system("pip freeze") -# download weights -if not os.path.exists('realesr-general-x4v3.pth'): - os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P .") -if not os.path.exists('GFPGANv1.2.pth'): - os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P .") -if not os.path.exists('GFPGANv1.3.pth'): - os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P .") -if not os.path.exists('GFPGANv1.4.pth'): - os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P .") -if not os.path.exists('RestoreFormer.pth'): - os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth -P .") -if not os.path.exists('CodeFormer.pth'): - os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/CodeFormer.pth -P .") - -torch.hub.download_url_to_file( - 'https://thumbs.dreamstime.com/b/tower-bridge-traditional-red-bus-black-white-colors-view-to-tower-bridge-london-black-white-colors-108478942.jpg', - 'a1.jpg') -torch.hub.download_url_to_file( - 'https://media.istockphoto.com/id/523514029/photo/london-skyline-b-w.jpg?s=612x612&w=0&k=20&c=kJS1BAtfqYeUDaORupj0sBPc1hpzJhBUUqEFfRnHzZ0=', - 'a2.jpg') -torch.hub.download_url_to_file( - 'https://i.guim.co.uk/img/media/06f614065ed82ca0e917b149a32493c791619854/0_0_3648_2789/master/3648.jpg?width=700&quality=85&auto=format&fit=max&s=05764b507c18a38590090d987c8b6202', - 'a3.jpg') -torch.hub.download_url_to_file( - 'https://i.pinimg.com/736x/46/96/9e/46969eb94aec2437323464804d27706d--victorian-london-victorian-era.jpg', - 'a4.jpg') - -# background enhancer with RealESRGAN -model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu') -model_path = 'realesr-general-x4v3.pth' -half = True if torch.cuda.is_available() else False -upsampler = RealESRGANer(scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half) - -os.makedirs('output', exist_ok=True) - - -# def inference(img, version, scale, weight): -def inference(img, version, scale): - # weight /= 100 - print(img, version, scale) - try: - extension = os.path.splitext(os.path.basename(str(img)))[1] - img = cv2.imread(img, cv2.IMREAD_UNCHANGED) - if len(img.shape) == 3 and img.shape[2] == 4: - img_mode = 'RGBA' - elif len(img.shape) == 2: # for gray inputs - img_mode = None - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - else: - img_mode = None - - h, w = img.shape[0:2] - if h < 300: - img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4) - - if version == 'v1.2': - face_enhancer = GFPGANer( - model_path='GFPGANv1.2.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler) - elif version == 'v1.3': - face_enhancer = GFPGANer( - model_path='GFPGANv1.3.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler) - elif version == 'v1.4': - face_enhancer = GFPGANer( - model_path='GFPGANv1.4.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler) - elif version == 'RestoreFormer': - face_enhancer = GFPGANer( - model_path='RestoreFormer.pth', upscale=2, arch='RestoreFormer', channel_multiplier=2, bg_upsampler=upsampler) - elif version == 'CodeFormer': - face_enhancer = GFPGANer( - model_path='CodeFormer.pth', upscale=2, arch='CodeFormer', channel_multiplier=2, bg_upsampler=upsampler) - elif version == 'RealESR-General-x4v3': - face_enhancer = GFPGANer( - model_path='realesr-general-x4v3.pth', upscale=2, arch='realesr-general', channel_multiplier=2, bg_upsampler=upsampler) - - try: - # _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True, weight=weight) - _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) - except RuntimeError as error: - print('Error', error) - - try: - if scale != 2: - interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4 - h, w = img.shape[0:2] - output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation) - except Exception as error: - print('wrong scale input.', error) - if img_mode == 'RGBA': # RGBA images should be saved in png format - extension = 'png' - else: - extension = 'jpg' - save_path = f'output/out.{extension}' - cv2.imwrite(save_path, output) - - output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB) - return output, save_path - except Exception as error: - print('global exception', error) - return None, None - - -title = "Image Upscaling & Restoration(esp. Face) using GFPGAN Algorithm" -description = r"""Gradio demo for GFPGAN: Towards Real-World Blind Face Restoration and Upscalling of the image with a Generative Facial Prior.
-Practically the algorithm is used to restore your **old photos** or improve **AI-generated faces**.
-To use it, simply just upload the concerned image.
-""" -article = r""" -[![download](https://img.shields.io/github/downloads/TencentARC/GFPGAN/total.svg)](https://github.com/TencentARC/GFPGAN/releases) -[![GitHub Stars](https://img.shields.io/github/stars/TencentARC/GFPGAN?style=social)](https://github.com/TencentARC/GFPGAN) -[![arXiv](https://img.shields.io/badge/arXiv-Paper-.svg)](https://arxiv.org/abs/2101.04061) -
visitor badge
-""" -demo = gr.Interface( - inference, [ - gr.inputs.Image(type="filepath", label="Input"), - # gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer', 'CodeFormer'], type="value", default='v1.4', label='version'), - gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer','CodeFormer','RealESR-General-x4v3'], type="value", default='v1.4', label='version'), - gr.inputs.Number(label="Rescaling factor", default=2), - # gr.Slider(0, 100, label='Weight, only for CodeFormer. 0 for better quality, 100 for better identity', default=50) - ], [ - gr.outputs.Image(type="numpy", label="Output (The whole image)"), - gr.outputs.File(label="Download the output image") - ], - title=title, - description=description, - article=article, - # examples=[['AI-generate.jpg', 'v1.4', 2, 50], ['lincoln.jpg', 'v1.4', 2, 50], ['Blake_Lively.jpg', 'v1.4', 2, 50], - # ['10045.png', 'v1.4', 2, 50]]).launch() - examples=[['a1.jpg', 'v1.4', 2], ['a2.jpg', 'v1.4', 2], ['a3.jpg', 'v1.4', 2],['a4.jpg', 'v1.4', 2]]) - -demo.queue(concurrency_count=4) -demo.launch() \ No newline at end of file diff --git "a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" "b/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" deleted file mode 100644 index ffbb05599ef09c9de25334ebeca2eef8022b9aaf..0000000000000000000000000000000000000000 --- "a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" +++ /dev/null @@ -1,160 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive - -fast_debug = False - -def readPdf(pdfPath): - """ - 读取pdf文件,返回文本内容 - """ - import pdfminer - from pdfminer.pdfparser import PDFParser - from pdfminer.pdfdocument import PDFDocument - from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed - from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter - from pdfminer.pdfdevice import PDFDevice - from pdfminer.layout import LAParams - from pdfminer.converter import PDFPageAggregator - - fp = open(pdfPath, 'rb') - - # Create a PDF parser object associated with the file object - parser = PDFParser(fp) - - # Create a PDF document object that stores the document structure. - # Password for initialization as 2nd parameter - document = PDFDocument(parser) - # Check if the document allows text extraction. If not, abort. - if not document.is_extractable: - raise PDFTextExtractionNotAllowed - - # Create a PDF resource manager object that stores shared resources. - rsrcmgr = PDFResourceManager() - - # Create a PDF device object. - # device = PDFDevice(rsrcmgr) - - # BEGIN LAYOUT ANALYSIS. - # Set parameters for analysis. - laparams = LAParams( - char_margin=10.0, - line_margin=0.2, - boxes_flow=0.2, - all_texts=False, - ) - # Create a PDF page aggregator object. - device = PDFPageAggregator(rsrcmgr, laparams=laparams) - # Create a PDF interpreter object. - interpreter = PDFPageInterpreter(rsrcmgr, device) - - # loop over all pages in the document - outTextList = [] - for page in PDFPage.create_pages(document): - # read the page into a layout object - interpreter.process_page(page) - layout = device.get_result() - for obj in layout._objs: - if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal): - # print(obj.get_text()) - outTextList.append(obj.get_text()) - - return outTextList - - -def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, glob, os - from bs4 import BeautifulSoup - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - if ".tex" in fp: - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - if ".pdf" in fp.lower(): - file_content = readPdf(fp) - file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk') - - prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" - i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[], - sys_prompt="总结文章。" - ) # 带超时倒计时 - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) - - all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) - i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=history, - sys_prompt="总结文章。" - ) # 带超时倒计时 - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - - - -@CatchException -def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import pdfminer, bs4 - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - diff --git a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/callback/__init__.py b/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/callback/__init__.py deleted file mode 100644 index ccab5ae57a2d832309c4e72fcc51e5e2ad48a59b..0000000000000000000000000000000000000000 --- a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/callback/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -from pytorch_lightning.callbacks import ( - ModelCheckpoint, - LearningRateMonitor, - EarlyStopping, -) -from src.utils.registry import Registry - -# from src.callback.visualizer_callbacks import VisualizerCallback - -CALLBACK_REGISTRY = Registry("CALLBACK") - -CALLBACK_REGISTRY.register(EarlyStopping) -CALLBACK_REGISTRY.register(ModelCheckpoint) -CALLBACK_REGISTRY.register(LearningRateMonitor) -# TODO: add WandB visualizer callback -# CALLBACK_REGISTRY.register(VisualizerCallback) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/README_sdxl.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/README_sdxl.md deleted file mode 100644 index 7dcde78f2cfdcd2876686f865c57be8b930dab41..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/README_sdxl.md +++ /dev/null @@ -1,189 +0,0 @@ -# DreamBooth training example for Stable Diffusion XL (SDXL) - -[DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few (3~5) images of a subject. - -The `train_dreambooth_lora_sdxl.py` script shows how to implement the training procedure and adapt it for [Stable Diffusion XL](https://huggingface.co/papers/2307.01952). - -> 💡 **Note**: For now, we only allow DreamBooth fine-tuning of the SDXL UNet via LoRA. LoRA is a parameter-efficient fine-tuning technique introduced in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*. - -## Running locally with PyTorch - -### Installing the dependencies - -Before running the scripts, make sure to install the library's training dependencies: - -**Important** - -To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: - -```bash -git clone https://github.com/huggingface/diffusers -cd diffusers -pip install -e . -``` - -Then cd in the `examples/dreambooth` folder and run -```bash -pip install -r requirements_sdxl.txt -``` - -And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: - -```bash -accelerate config -``` - -Or for a default accelerate configuration without answering questions about your environment - -```bash -accelerate config default -``` - -Or if your environment doesn't support an interactive shell (e.g., a notebook) - -```python -from accelerate.utils import write_basic_config -write_basic_config() -``` - -When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. - -### Dog toy example - -Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example. - -Let's first download it locally: - -```python -from huggingface_hub import snapshot_download - -local_dir = "./dog" -snapshot_download( - "diffusers/dog-example", - local_dir=local_dir, repo_type="dataset", - ignore_patterns=".gitattributes", -) -``` - -Since SDXL 0.9 weights are gated, we need to be authenticated to be able to use them. So, let's run: - -```bash -huggingface-cli login -``` - -This will also allow us to push the trained LoRA parameters to the Hugging Face Hub platform. - -Now, we can launch training using: - -```bash -export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0" -export INSTANCE_DIR="dog" -export OUTPUT_DIR="lora-trained-xl" - -accelerate launch train_dreambooth_lora_sdxl.py \ - --pretrained_model_name_or_path=$MODEL_NAME \ - --instance_data_dir=$INSTANCE_DIR \ - --output_dir=$OUTPUT_DIR \ - --mixed_precision="fp16" \ - --instance_prompt="a photo of sks dog" \ - --resolution=1024 \ - --train_batch_size=1 \ - --gradient_accumulation_steps=4 \ - --learning_rate=1e-4 \ - --report_to="wandb" \ - --lr_scheduler="constant" \ - --lr_warmup_steps=0 \ - --max_train_steps=500 \ - --validation_prompt="A photo of sks dog in a bucket" \ - --validation_epochs=25 \ - --seed="0" \ - --push_to_hub -``` - -To better track our training experiments, we're using the following flags in the command above: - -* `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`. -* `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected. - -Our experiments were conducted on a single 40GB A100 GPU. - -### Inference - -Once training is done, we can perform inference like so: - -```python -from huggingface_hub.repocard import RepoCard -from diffusers import DiffusionPipeline -import torch - -lora_model_id = <"lora-sdxl-dreambooth-id"> -card = RepoCard.load(lora_model_id) -base_model_id = card.data.to_dict()["base_model"] - -pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) -pipe = pipe.to("cuda") -pipe.load_lora_weights(lora_model_id) -image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0] -image.save("sks_dog.png") -``` - -We can further refine the outputs with the [Refiner](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0): - -```python -from huggingface_hub.repocard import RepoCard -from diffusers import DiffusionPipeline, StableDiffusionXLImg2ImgPipeline -import torch - -lora_model_id = <"lora-sdxl-dreambooth-id"> -card = RepoCard.load(lora_model_id) -base_model_id = card.data.to_dict()["base_model"] - -# Load the base pipeline and load the LoRA parameters into it. -pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16) -pipe = pipe.to("cuda") -pipe.load_lora_weights(lora_model_id) - -# Load the refiner. -refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained( - "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16" -) -refiner.to("cuda") - -prompt = "A picture of a sks dog in a bucket" -generator = torch.Generator("cuda").manual_seed(0) - -# Run inference. -image = pipe(prompt=prompt, output_type="latent", generator=generator).images[0] -image = refiner(prompt=prompt, image=image[None, :], generator=generator).images[0] -image.save("refined_sks_dog.png") -``` - -Here's a side-by-side comparison of the with and without Refiner pipeline outputs: - -| Without Refiner | With Refiner | -|---|---| -| ![](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/sks_dog.png) | ![](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/sd_xl/refined_sks_dog.png) | - -### Training with text encoder(s) - -Alongside the UNet, LoRA fine-tuning of the text encoders is also supported. To do so, just specify `--train_text_encoder` while launching training. Please keep the following points in mind: - -* SDXL has two text encoders. So, we fine-tune both using LoRA. -* When not fine-tuning the text encoders, we ALWAYS precompute the text embeddings to save memory. - -### Specifying a better VAE - -SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of a better VAE (such as [this one](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)). - -## Notes - -In our experiments, we found that SDXL yields good initial results without extensive hyperparameter tuning. For example, without fine-tuning the text encoders and without using prior-preservation, we observed decent results. We didn't explore further hyper-parameter tuning experiments, but we do encourage the community to explore this avenue further and share their results with us 🤗 - -## Results - -You can explore the results from a couple of our internal experiments by checking out this link: [https://wandb.ai/sayakpaul/dreambooth-lora-sd-xl](https://wandb.ai/sayakpaul/dreambooth-lora-sd-xl). Specifically, we used the same script with the exact same hyperparameters on the following datasets: - -* [Dogs](https://huggingface.co/datasets/diffusers/dog-example) -* [Starbucks logo](https://huggingface.co/datasets/diffusers/starbucks-example) -* [Mr. Potato Head](https://huggingface.co/datasets/diffusers/potato-head-example) -* [Keramer face](https://huggingface.co/datasets/diffusers/keramer-face-example) diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py deleted file mode 100644 index b7829f76ec12f946490618e0d03857777efdf219..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py +++ /dev/null @@ -1,1196 +0,0 @@ -import math -from typing import Optional, Union - -import torch -from torch import nn - -from ...configuration_utils import ConfigMixin, register_to_config -from ...models import ModelMixin -from ...models.attention import AdaLayerNorm, FeedForward -from ...models.attention_processor import Attention -from ...models.embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed -from ...models.transformer_2d import Transformer2DModelOutput -from ...utils import logging - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -def _no_grad_trunc_normal_(tensor, mean, std, a, b): - # Cut & paste from PyTorch official master until it's in a few official releases - RW - # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf - def norm_cdf(x): - # Computes standard normal cumulative distribution function - return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0 - - if (mean < a - 2 * std) or (mean > b + 2 * std): - logger.warning( - "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " - "The distribution of values may be incorrect." - ) - - with torch.no_grad(): - # Values are generated by using a truncated uniform distribution and - # then using the inverse CDF for the normal distribution. - # Get upper and lower cdf values - l = norm_cdf((a - mean) / std) - u = norm_cdf((b - mean) / std) - - # Uniformly fill tensor with values from [l, u], then translate to - # [2l-1, 2u-1]. - tensor.uniform_(2 * l - 1, 2 * u - 1) - - # Use inverse cdf transform for normal distribution to get truncated - # standard normal - tensor.erfinv_() - - # Transform to proper mean, std - tensor.mul_(std * math.sqrt(2.0)) - tensor.add_(mean) - - # Clamp to ensure it's in the proper range - tensor.clamp_(min=a, max=b) - return tensor - - -def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0): - # type: (torch.Tensor, float, float, float, float) -> torch.Tensor - r"""Fills the input Tensor with values drawn from a truncated - normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, - \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for - generating the random values works best when :math:`a \leq \text{mean} \leq b`. - - Args: - tensor: an n-dimensional `torch.Tensor` - mean: the mean of the normal distribution - std: the standard deviation of the normal distribution - a: the minimum cutoff value - b: the maximum cutoff value - Examples: - >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) - """ - return _no_grad_trunc_normal_(tensor, mean, std, a, b) - - -class PatchEmbed(nn.Module): - """2D Image to Patch Embedding""" - - def __init__( - self, - height=224, - width=224, - patch_size=16, - in_channels=3, - embed_dim=768, - layer_norm=False, - flatten=True, - bias=True, - use_pos_embed=True, - ): - super().__init__() - - num_patches = (height // patch_size) * (width // patch_size) - self.flatten = flatten - self.layer_norm = layer_norm - - self.proj = nn.Conv2d( - in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias - ) - if layer_norm: - self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6) - else: - self.norm = None - - self.use_pos_embed = use_pos_embed - if self.use_pos_embed: - pos_embed = get_2d_sincos_pos_embed(embed_dim, int(num_patches**0.5)) - self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False) - - def forward(self, latent): - latent = self.proj(latent) - if self.flatten: - latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC - if self.layer_norm: - latent = self.norm(latent) - if self.use_pos_embed: - return latent + self.pos_embed - else: - return latent - - -class SkipBlock(nn.Module): - def __init__(self, dim: int): - super().__init__() - - self.skip_linear = nn.Linear(2 * dim, dim) - - # Use torch.nn.LayerNorm for now, following the original code - self.norm = nn.LayerNorm(dim) - - def forward(self, x, skip): - x = self.skip_linear(torch.cat([x, skip], dim=-1)) - x = self.norm(x) - - return x - - -# Modified to support both pre-LayerNorm and post-LayerNorm configurations -# Don't support AdaLayerNormZero for now -# Modified from diffusers.models.attention.BasicTransformerBlock -class UTransformerBlock(nn.Module): - r""" - A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations. - - Parameters: - dim (`int`): The number of channels in the input and output. - num_attention_heads (`int`): The number of heads to use for multi-head attention. - attention_head_dim (`int`): The number of channels in each head. - dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. - cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. - activation_fn (`str`, *optional*, defaults to `"geglu"`): - Activation function to be used in feed-forward. - num_embeds_ada_norm (:obj: `int`, *optional*): - The number of diffusion steps used during training. See `Transformer2DModel`. - attention_bias (:obj: `bool`, *optional*, defaults to `False`): - Configure if the attentions should contain a bias parameter. - only_cross_attention (`bool`, *optional*): - Whether to use only cross-attention layers. In this case two cross attention layers are used. - double_self_attention (`bool`, *optional*): - Whether to use two self-attention layers. In this case no cross attention layers are used. - upcast_attention (`bool`, *optional*): - Whether to upcast the query and key to float32 when performing the attention calculation. - norm_elementwise_affine (`bool`, *optional*): - Whether to use learnable per-element affine parameters during layer normalization. - norm_type (`str`, defaults to `"layer_norm"`): - The layer norm implementation to use. - pre_layer_norm (`bool`, *optional*): - Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), - as opposed to after ("post-LayerNorm"). Note that `BasicTransformerBlock` uses pre-LayerNorm, e.g. - `pre_layer_norm = True`. - final_dropout (`bool`, *optional*): - Whether to use a final Dropout layer after the feedforward network. - """ - - def __init__( - self, - dim: int, - num_attention_heads: int, - attention_head_dim: int, - dropout=0.0, - cross_attention_dim: Optional[int] = None, - activation_fn: str = "geglu", - num_embeds_ada_norm: Optional[int] = None, - attention_bias: bool = False, - only_cross_attention: bool = False, - double_self_attention: bool = False, - upcast_attention: bool = False, - norm_elementwise_affine: bool = True, - norm_type: str = "layer_norm", - pre_layer_norm: bool = True, - final_dropout: bool = False, - ): - super().__init__() - self.only_cross_attention = only_cross_attention - - self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" - - self.pre_layer_norm = pre_layer_norm - - if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: - raise ValueError( - f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" - f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." - ) - - # 1. Self-Attn - self.attn1 = Attention( - query_dim=dim, - heads=num_attention_heads, - dim_head=attention_head_dim, - dropout=dropout, - bias=attention_bias, - cross_attention_dim=cross_attention_dim if only_cross_attention else None, - upcast_attention=upcast_attention, - ) - - # 2. Cross-Attn - if cross_attention_dim is not None or double_self_attention: - self.attn2 = Attention( - query_dim=dim, - cross_attention_dim=cross_attention_dim if not double_self_attention else None, - heads=num_attention_heads, - dim_head=attention_head_dim, - dropout=dropout, - bias=attention_bias, - upcast_attention=upcast_attention, - ) # is self-attn if encoder_hidden_states is none - else: - self.attn2 = None - - if self.use_ada_layer_norm: - self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) - else: - self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) - - if cross_attention_dim is not None or double_self_attention: - # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. - # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during - # the second cross attention block. - self.norm2 = ( - AdaLayerNorm(dim, num_embeds_ada_norm) - if self.use_ada_layer_norm - else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) - ) - else: - self.norm2 = None - - # 3. Feed-forward - self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) - self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) - - def forward( - self, - hidden_states, - attention_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - timestep=None, - cross_attention_kwargs=None, - class_labels=None, - ): - # Pre-LayerNorm - if self.pre_layer_norm: - if self.use_ada_layer_norm: - norm_hidden_states = self.norm1(hidden_states, timestep) - else: - norm_hidden_states = self.norm1(hidden_states) - else: - norm_hidden_states = hidden_states - - # 1. Self-Attention - cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} - attn_output = self.attn1( - norm_hidden_states, - encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, - attention_mask=attention_mask, - **cross_attention_kwargs, - ) - - # Post-LayerNorm - if not self.pre_layer_norm: - if self.use_ada_layer_norm: - attn_output = self.norm1(attn_output, timestep) - else: - attn_output = self.norm1(attn_output) - - hidden_states = attn_output + hidden_states - - if self.attn2 is not None: - # Pre-LayerNorm - if self.pre_layer_norm: - norm_hidden_states = ( - self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) - ) - else: - norm_hidden_states = hidden_states - # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly - # prepare attention mask here - - # 2. Cross-Attention - attn_output = self.attn2( - norm_hidden_states, - encoder_hidden_states=encoder_hidden_states, - attention_mask=encoder_attention_mask, - **cross_attention_kwargs, - ) - - # Post-LayerNorm - if not self.pre_layer_norm: - attn_output = self.norm2(attn_output, timestep) if self.use_ada_layer_norm else self.norm2(attn_output) - - hidden_states = attn_output + hidden_states - - # 3. Feed-forward - # Pre-LayerNorm - if self.pre_layer_norm: - norm_hidden_states = self.norm3(hidden_states) - else: - norm_hidden_states = hidden_states - - ff_output = self.ff(norm_hidden_states) - - # Post-LayerNorm - if not self.pre_layer_norm: - ff_output = self.norm3(ff_output) - - hidden_states = ff_output + hidden_states - - return hidden_states - - -# Like UTransformerBlock except with LayerNorms on the residual backbone of the block -# Modified from diffusers.models.attention.BasicTransformerBlock -class UniDiffuserBlock(nn.Module): - r""" - A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations and puts the - LayerNorms on the residual backbone of the block. This matches the transformer block in the [original UniDiffuser - implementation](https://github.com/thu-ml/unidiffuser/blob/main/libs/uvit_multi_post_ln_v1.py#L104). - - Parameters: - dim (`int`): The number of channels in the input and output. - num_attention_heads (`int`): The number of heads to use for multi-head attention. - attention_head_dim (`int`): The number of channels in each head. - dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. - cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. - activation_fn (`str`, *optional*, defaults to `"geglu"`): - Activation function to be used in feed-forward. - num_embeds_ada_norm (:obj: `int`, *optional*): - The number of diffusion steps used during training. See `Transformer2DModel`. - attention_bias (:obj: `bool`, *optional*, defaults to `False`): - Configure if the attentions should contain a bias parameter. - only_cross_attention (`bool`, *optional*): - Whether to use only cross-attention layers. In this case two cross attention layers are used. - double_self_attention (`bool`, *optional*): - Whether to use two self-attention layers. In this case no cross attention layers are used. - upcast_attention (`bool`, *optional*): - Whether to upcast the query and key to float() when performing the attention calculation. - norm_elementwise_affine (`bool`, *optional*): - Whether to use learnable per-element affine parameters during layer normalization. - norm_type (`str`, defaults to `"layer_norm"`): - The layer norm implementation to use. - pre_layer_norm (`bool`, *optional*): - Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), - as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm - (`pre_layer_norm = False`). - final_dropout (`bool`, *optional*): - Whether to use a final Dropout layer after the feedforward network. - """ - - def __init__( - self, - dim: int, - num_attention_heads: int, - attention_head_dim: int, - dropout=0.0, - cross_attention_dim: Optional[int] = None, - activation_fn: str = "geglu", - num_embeds_ada_norm: Optional[int] = None, - attention_bias: bool = False, - only_cross_attention: bool = False, - double_self_attention: bool = False, - upcast_attention: bool = False, - norm_elementwise_affine: bool = True, - norm_type: str = "layer_norm", - pre_layer_norm: bool = False, - final_dropout: bool = True, - ): - super().__init__() - self.only_cross_attention = only_cross_attention - - self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" - - self.pre_layer_norm = pre_layer_norm - - if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: - raise ValueError( - f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" - f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." - ) - - # 1. Self-Attn - self.attn1 = Attention( - query_dim=dim, - heads=num_attention_heads, - dim_head=attention_head_dim, - dropout=dropout, - bias=attention_bias, - cross_attention_dim=cross_attention_dim if only_cross_attention else None, - upcast_attention=upcast_attention, - ) - - # 2. Cross-Attn - if cross_attention_dim is not None or double_self_attention: - self.attn2 = Attention( - query_dim=dim, - cross_attention_dim=cross_attention_dim if not double_self_attention else None, - heads=num_attention_heads, - dim_head=attention_head_dim, - dropout=dropout, - bias=attention_bias, - upcast_attention=upcast_attention, - ) # is self-attn if encoder_hidden_states is none - else: - self.attn2 = None - - if self.use_ada_layer_norm: - self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) - else: - self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) - - if cross_attention_dim is not None or double_self_attention: - # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. - # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during - # the second cross attention block. - self.norm2 = ( - AdaLayerNorm(dim, num_embeds_ada_norm) - if self.use_ada_layer_norm - else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) - ) - else: - self.norm2 = None - - # 3. Feed-forward - self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) - self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) - - def forward( - self, - hidden_states, - attention_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - timestep=None, - cross_attention_kwargs=None, - class_labels=None, - ): - # Following the diffusers transformer block implementation, put the LayerNorm on the - # residual backbone - # Pre-LayerNorm - if self.pre_layer_norm: - if self.use_ada_layer_norm: - hidden_states = self.norm1(hidden_states, timestep) - else: - hidden_states = self.norm1(hidden_states) - - # 1. Self-Attention - cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} - attn_output = self.attn1( - hidden_states, - encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, - attention_mask=attention_mask, - **cross_attention_kwargs, - ) - - hidden_states = attn_output + hidden_states - - # Following the diffusers transformer block implementation, put the LayerNorm on the - # residual backbone - # Post-LayerNorm - if not self.pre_layer_norm: - if self.use_ada_layer_norm: - hidden_states = self.norm1(hidden_states, timestep) - else: - hidden_states = self.norm1(hidden_states) - - if self.attn2 is not None: - # Pre-LayerNorm - if self.pre_layer_norm: - hidden_states = ( - self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) - ) - # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly - # prepare attention mask here - - # 2. Cross-Attention - attn_output = self.attn2( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - attention_mask=encoder_attention_mask, - **cross_attention_kwargs, - ) - - hidden_states = attn_output + hidden_states - - # Post-LayerNorm - if not self.pre_layer_norm: - hidden_states = ( - self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) - ) - - # 3. Feed-forward - # Pre-LayerNorm - if self.pre_layer_norm: - hidden_states = self.norm3(hidden_states) - - ff_output = self.ff(hidden_states) - - hidden_states = ff_output + hidden_states - - # Post-LayerNorm - if not self.pre_layer_norm: - hidden_states = self.norm3(hidden_states) - - return hidden_states - - -# Modified from diffusers.models.transformer_2d.Transformer2DModel -# Modify the transformer block structure to be U-Net like following U-ViT -# Only supports patch-style input and torch.nn.LayerNorm currently -# https://github.com/baofff/U-ViT -class UTransformer2DModel(ModelMixin, ConfigMixin): - """ - Transformer model based on the [U-ViT](https://github.com/baofff/U-ViT) architecture for image-like data. Compared - to [`Transformer2DModel`], this model has skip connections between transformer blocks in a "U"-shaped fashion, - similar to a U-Net. Supports only continuous (actual embeddings) inputs, which are embedded via a [`PatchEmbed`] - layer and then reshaped to (b, t, d). - - Parameters: - num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. - attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. - in_channels (`int`, *optional*): - Pass if the input is continuous. The number of channels in the input. - out_channels (`int`, *optional*): - The number of output channels; if `None`, defaults to `in_channels`. - num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. - dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. - norm_num_groups (`int`, *optional*, defaults to `32`): - The number of groups to use when performing Group Normalization. - cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. - attention_bias (`bool`, *optional*): - Configure if the TransformerBlocks' attention should contain a bias parameter. - sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. - Note that this is fixed at training time as it is used for learning a number of position embeddings. See - `ImagePositionalEmbeddings`. - num_vector_embeds (`int`, *optional*): - Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. - Includes the class for the masked latent pixel. - patch_size (`int`, *optional*, defaults to 2): - The patch size to use in the patch embedding. - activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. - num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. - The number of diffusion steps used during training. Note that this is fixed at training time as it is used - to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for - up to but not more than steps than `num_embeds_ada_norm`. - use_linear_projection (int, *optional*): TODO: Not used - only_cross_attention (`bool`, *optional*): - Whether to use only cross-attention layers. In this case two cross attention layers are used in each - transformer block. - upcast_attention (`bool`, *optional*): - Whether to upcast the query and key to float() when performing the attention calculation. - norm_type (`str`, *optional*, defaults to `"layer_norm"`): - The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`. - block_type (`str`, *optional*, defaults to `"unidiffuser"`): - The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual - backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard - behavior in `diffusers`.) - pre_layer_norm (`bool`, *optional*): - Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), - as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm - (`pre_layer_norm = False`). - norm_elementwise_affine (`bool`, *optional*): - Whether to use learnable per-element affine parameters during layer normalization. - use_patch_pos_embed (`bool`, *optional*): - Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`). - final_dropout (`bool`, *optional*): - Whether to use a final Dropout layer after the feedforward network. - """ - - @register_to_config - def __init__( - self, - num_attention_heads: int = 16, - attention_head_dim: int = 88, - in_channels: Optional[int] = None, - out_channels: Optional[int] = None, - num_layers: int = 1, - dropout: float = 0.0, - norm_num_groups: int = 32, - cross_attention_dim: Optional[int] = None, - attention_bias: bool = False, - sample_size: Optional[int] = None, - num_vector_embeds: Optional[int] = None, - patch_size: Optional[int] = 2, - activation_fn: str = "geglu", - num_embeds_ada_norm: Optional[int] = None, - use_linear_projection: bool = False, - only_cross_attention: bool = False, - upcast_attention: bool = False, - norm_type: str = "layer_norm", - block_type: str = "unidiffuser", - pre_layer_norm: bool = False, - norm_elementwise_affine: bool = True, - use_patch_pos_embed=False, - ff_final_dropout: bool = False, - ): - super().__init__() - self.use_linear_projection = use_linear_projection - self.num_attention_heads = num_attention_heads - self.attention_head_dim = attention_head_dim - inner_dim = num_attention_heads * attention_head_dim - - # 1. Input - # Only support patch input of shape (batch_size, num_channels, height, width) for now - assert in_channels is not None and patch_size is not None, "Patch input requires in_channels and patch_size." - - assert sample_size is not None, "UTransformer2DModel over patched input must provide sample_size" - - # 2. Define input layers - self.height = sample_size - self.width = sample_size - - self.patch_size = patch_size - self.pos_embed = PatchEmbed( - height=sample_size, - width=sample_size, - patch_size=patch_size, - in_channels=in_channels, - embed_dim=inner_dim, - use_pos_embed=use_patch_pos_embed, - ) - - # 3. Define transformers blocks - # Modify this to have in_blocks ("downsample" blocks, even though we don't actually downsample), a mid_block, - # and out_blocks ("upsample" blocks). Like a U-Net, there are skip connections from in_blocks to out_blocks in - # a "U"-shaped fashion (e.g. first in_block to last out_block, etc.). - # Quick hack to make the transformer block type configurable - if block_type == "unidiffuser": - block_cls = UniDiffuserBlock - else: - block_cls = UTransformerBlock - self.transformer_in_blocks = nn.ModuleList( - [ - block_cls( - inner_dim, - num_attention_heads, - attention_head_dim, - dropout=dropout, - cross_attention_dim=cross_attention_dim, - activation_fn=activation_fn, - num_embeds_ada_norm=num_embeds_ada_norm, - attention_bias=attention_bias, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - norm_type=norm_type, - pre_layer_norm=pre_layer_norm, - norm_elementwise_affine=norm_elementwise_affine, - final_dropout=ff_final_dropout, - ) - for d in range(num_layers // 2) - ] - ) - - self.transformer_mid_block = block_cls( - inner_dim, - num_attention_heads, - attention_head_dim, - dropout=dropout, - cross_attention_dim=cross_attention_dim, - activation_fn=activation_fn, - num_embeds_ada_norm=num_embeds_ada_norm, - attention_bias=attention_bias, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - norm_type=norm_type, - pre_layer_norm=pre_layer_norm, - norm_elementwise_affine=norm_elementwise_affine, - final_dropout=ff_final_dropout, - ) - - # For each skip connection, we use a SkipBlock (concatenation + Linear + LayerNorm) to process the inputs - # before each transformer out_block. - self.transformer_out_blocks = nn.ModuleList( - [ - nn.ModuleDict( - { - "skip": SkipBlock( - inner_dim, - ), - "block": block_cls( - inner_dim, - num_attention_heads, - attention_head_dim, - dropout=dropout, - cross_attention_dim=cross_attention_dim, - activation_fn=activation_fn, - num_embeds_ada_norm=num_embeds_ada_norm, - attention_bias=attention_bias, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - norm_type=norm_type, - pre_layer_norm=pre_layer_norm, - norm_elementwise_affine=norm_elementwise_affine, - final_dropout=ff_final_dropout, - ), - } - ) - for d in range(num_layers // 2) - ] - ) - - # 4. Define output layers - self.out_channels = in_channels if out_channels is None else out_channels - - # Following the UniDiffuser U-ViT implementation, we process the transformer output with - # a LayerNorm layer with per-element affine params - self.norm_out = nn.LayerNorm(inner_dim) - - def forward( - self, - hidden_states, - encoder_hidden_states=None, - timestep=None, - class_labels=None, - cross_attention_kwargs=None, - return_dict: bool = True, - hidden_states_is_embedding: bool = False, - unpatchify: bool = True, - ): - """ - Args: - hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`. - When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input - hidden_states - encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): - Conditional embeddings for cross attention layer. If not given, cross-attention defaults to - self-attention. - timestep ( `torch.long`, *optional*): - Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step. - class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): - Optional class labels to be applied as an embedding in AdaLayerZeroNorm. Used to indicate class labels - conditioning. - cross_attention_kwargs (*optional*): - Keyword arguments to supply to the cross attention layers, if used. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. - hidden_states_is_embedding (`bool`, *optional*, defaults to `False`): - Whether or not hidden_states is an embedding directly usable by the transformer. In this case we will - ignore input handling (e.g. continuous, vectorized, etc.) and directly feed hidden_states into the - transformer blocks. - unpatchify (`bool`, *optional*, defaults to `True`): - Whether to unpatchify the transformer output. - - Returns: - [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`: - [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - """ - # 0. Check inputs - - if not unpatchify and return_dict: - raise ValueError( - f"Cannot both define `unpatchify`: {unpatchify} and `return_dict`: {return_dict} since when" - f" `unpatchify` is {unpatchify} the returned output is of shape (batch_size, seq_len, hidden_dim)" - " rather than (batch_size, num_channels, height, width)." - ) - - # 1. Input - if not hidden_states_is_embedding: - hidden_states = self.pos_embed(hidden_states) - - # 2. Blocks - - # In ("downsample") blocks - skips = [] - for in_block in self.transformer_in_blocks: - hidden_states = in_block( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - timestep=timestep, - cross_attention_kwargs=cross_attention_kwargs, - class_labels=class_labels, - ) - skips.append(hidden_states) - - # Mid block - hidden_states = self.transformer_mid_block(hidden_states) - - # Out ("upsample") blocks - for out_block in self.transformer_out_blocks: - hidden_states = out_block["skip"](hidden_states, skips.pop()) - hidden_states = out_block["block"]( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - timestep=timestep, - cross_attention_kwargs=cross_attention_kwargs, - class_labels=class_labels, - ) - - # 3. Output - # Don't support AdaLayerNorm for now, so no conditioning/scale/shift logic - hidden_states = self.norm_out(hidden_states) - # hidden_states = self.proj_out(hidden_states) - - if unpatchify: - # unpatchify - height = width = int(hidden_states.shape[1] ** 0.5) - hidden_states = hidden_states.reshape( - shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) - ) - hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) - output = hidden_states.reshape( - shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) - ) - else: - output = hidden_states - - if not return_dict: - return (output,) - - return Transformer2DModelOutput(sample=output) - - -class UniDiffuserModel(ModelMixin, ConfigMixin): - """ - Transformer model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is a - modification of [`UTransformer2DModel`] with input and output heads for the VAE-embedded latent image, the - CLIP-embedded image, and the CLIP-embedded prompt (see paper for more details). - - Parameters: - text_dim (`int`): The hidden dimension of the CLIP text model used to embed images. - clip_img_dim (`int`): The hidden dimension of the CLIP vision model used to embed prompts. - num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. - attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. - in_channels (`int`, *optional*): - Pass if the input is continuous. The number of channels in the input. - out_channels (`int`, *optional*): - The number of output channels; if `None`, defaults to `in_channels`. - num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. - dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. - norm_num_groups (`int`, *optional*, defaults to `32`): - The number of groups to use when performing Group Normalization. - cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. - attention_bias (`bool`, *optional*): - Configure if the TransformerBlocks' attention should contain a bias parameter. - sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. - Note that this is fixed at training time as it is used for learning a number of position embeddings. See - `ImagePositionalEmbeddings`. - num_vector_embeds (`int`, *optional*): - Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. - Includes the class for the masked latent pixel. - patch_size (`int`, *optional*, defaults to 2): - The patch size to use in the patch embedding. - activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. - num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. - The number of diffusion steps used during training. Note that this is fixed at training time as it is used - to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for - up to but not more than steps than `num_embeds_ada_norm`. - use_linear_projection (int, *optional*): TODO: Not used - only_cross_attention (`bool`, *optional*): - Whether to use only cross-attention layers. In this case two cross attention layers are used in each - transformer block. - upcast_attention (`bool`, *optional*): - Whether to upcast the query and key to float32 when performing the attention calculation. - norm_type (`str`, *optional*, defaults to `"layer_norm"`): - The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`. - block_type (`str`, *optional*, defaults to `"unidiffuser"`): - The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual - backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard - behavior in `diffusers`.) - pre_layer_norm (`bool`, *optional*): - Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"), - as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm - (`pre_layer_norm = False`). - norm_elementwise_affine (`bool`, *optional*): - Whether to use learnable per-element affine parameters during layer normalization. - use_patch_pos_embed (`bool`, *optional*): - Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`). - ff_final_dropout (`bool`, *optional*): - Whether to use a final Dropout layer after the feedforward network. - use_data_type_embedding (`bool`, *optional*): - Whether to use a data type embedding. This is only relevant for UniDiffuser-v1 style models; UniDiffuser-v1 - is continue-trained from UniDiffuser-v0 on non-publically-available data and accepts a `data_type` - argument, which can either be `1` to use the weights trained on non-publically-available data or `0` - otherwise. This argument is subsequently embedded by the data type embedding, if used. - """ - - @register_to_config - def __init__( - self, - text_dim: int = 768, - clip_img_dim: int = 512, - num_text_tokens: int = 77, - num_attention_heads: int = 16, - attention_head_dim: int = 88, - in_channels: Optional[int] = None, - out_channels: Optional[int] = None, - num_layers: int = 1, - dropout: float = 0.0, - norm_num_groups: int = 32, - cross_attention_dim: Optional[int] = None, - attention_bias: bool = False, - sample_size: Optional[int] = None, - num_vector_embeds: Optional[int] = None, - patch_size: Optional[int] = None, - activation_fn: str = "geglu", - num_embeds_ada_norm: Optional[int] = None, - use_linear_projection: bool = False, - only_cross_attention: bool = False, - upcast_attention: bool = False, - norm_type: str = "layer_norm", - block_type: str = "unidiffuser", - pre_layer_norm: bool = False, - use_timestep_embedding=False, - norm_elementwise_affine: bool = True, - use_patch_pos_embed=False, - ff_final_dropout: bool = True, - use_data_type_embedding: bool = False, - ): - super().__init__() - - # 0. Handle dimensions - self.inner_dim = num_attention_heads * attention_head_dim - - assert sample_size is not None, "UniDiffuserModel over patched input must provide sample_size" - self.sample_size = sample_size - self.in_channels = in_channels - self.out_channels = in_channels if out_channels is None else out_channels - - self.patch_size = patch_size - # Assume image is square... - self.num_patches = (self.sample_size // patch_size) * (self.sample_size // patch_size) - - # 1. Define input layers - # 1.1 Input layers for text and image input - # For now, only support patch input for VAE latent image input - self.vae_img_in = PatchEmbed( - height=sample_size, - width=sample_size, - patch_size=patch_size, - in_channels=in_channels, - embed_dim=self.inner_dim, - use_pos_embed=use_patch_pos_embed, - ) - self.clip_img_in = nn.Linear(clip_img_dim, self.inner_dim) - self.text_in = nn.Linear(text_dim, self.inner_dim) - - # 1.2. Timestep embeddings for t_img, t_text - self.timestep_img_proj = Timesteps( - self.inner_dim, - flip_sin_to_cos=True, - downscale_freq_shift=0, - ) - self.timestep_img_embed = ( - TimestepEmbedding( - self.inner_dim, - 4 * self.inner_dim, - out_dim=self.inner_dim, - ) - if use_timestep_embedding - else nn.Identity() - ) - - self.timestep_text_proj = Timesteps( - self.inner_dim, - flip_sin_to_cos=True, - downscale_freq_shift=0, - ) - self.timestep_text_embed = ( - TimestepEmbedding( - self.inner_dim, - 4 * self.inner_dim, - out_dim=self.inner_dim, - ) - if use_timestep_embedding - else nn.Identity() - ) - - # 1.3. Positional embedding - self.num_text_tokens = num_text_tokens - self.num_tokens = 1 + 1 + num_text_tokens + 1 + self.num_patches - self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, self.inner_dim)) - self.pos_embed_drop = nn.Dropout(p=dropout) - trunc_normal_(self.pos_embed, std=0.02) - - # 1.4. Handle data type token embeddings for UniDiffuser-V1, if necessary - self.use_data_type_embedding = use_data_type_embedding - if self.use_data_type_embedding: - self.data_type_token_embedding = nn.Embedding(2, self.inner_dim) - self.data_type_pos_embed_token = nn.Parameter(torch.zeros(1, 1, self.inner_dim)) - - # 2. Define transformer blocks - self.transformer = UTransformer2DModel( - num_attention_heads=num_attention_heads, - attention_head_dim=attention_head_dim, - in_channels=in_channels, - out_channels=out_channels, - num_layers=num_layers, - dropout=dropout, - norm_num_groups=norm_num_groups, - cross_attention_dim=cross_attention_dim, - attention_bias=attention_bias, - sample_size=sample_size, - num_vector_embeds=num_vector_embeds, - patch_size=patch_size, - activation_fn=activation_fn, - num_embeds_ada_norm=num_embeds_ada_norm, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - norm_type=norm_type, - block_type=block_type, - pre_layer_norm=pre_layer_norm, - norm_elementwise_affine=norm_elementwise_affine, - use_patch_pos_embed=use_patch_pos_embed, - ff_final_dropout=ff_final_dropout, - ) - - # 3. Define output layers - patch_dim = (patch_size**2) * out_channels - self.vae_img_out = nn.Linear(self.inner_dim, patch_dim) - self.clip_img_out = nn.Linear(self.inner_dim, clip_img_dim) - self.text_out = nn.Linear(self.inner_dim, text_dim) - - @torch.jit.ignore - def no_weight_decay(self): - return {"pos_embed"} - - def forward( - self, - latent_image_embeds: torch.FloatTensor, - image_embeds: torch.FloatTensor, - prompt_embeds: torch.FloatTensor, - timestep_img: Union[torch.Tensor, float, int], - timestep_text: Union[torch.Tensor, float, int], - data_type: Optional[Union[torch.Tensor, float, int]] = 1, - encoder_hidden_states=None, - cross_attention_kwargs=None, - ): - """ - Args: - latent_image_embeds (`torch.FloatTensor` of shape `(batch size, latent channels, height, width)`): - Latent image representation from the VAE encoder. - image_embeds (`torch.FloatTensor` of shape `(batch size, 1, clip_img_dim)`): - CLIP-embedded image representation (unsqueezed in the first dimension). - prompt_embeds (`torch.FloatTensor` of shape `(batch size, seq_len, text_dim)`): - CLIP-embedded text representation. - timestep_img (`torch.long` or `float` or `int`): - Current denoising step for the image. - timestep_text (`torch.long` or `float` or `int`): - Current denoising step for the text. - data_type: (`torch.int` or `float` or `int`, *optional*, defaults to `1`): - Only used in UniDiffuser-v1-style models. Can be either `1`, to use weights trained on nonpublic data, - or `0` otherwise. - encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): - Conditional embeddings for cross attention layer. If not given, cross-attention defaults to - self-attention. - cross_attention_kwargs (*optional*): - Keyword arguments to supply to the cross attention layers, if used. - - - Returns: - `tuple`: Returns relevant parts of the model's noise prediction: the first element of the tuple is tbe VAE - image embedding, the second element is the CLIP image embedding, and the third element is the CLIP text - embedding. - """ - batch_size = latent_image_embeds.shape[0] - - # 1. Input - # 1.1. Map inputs to shape (B, N, inner_dim) - vae_hidden_states = self.vae_img_in(latent_image_embeds) - clip_hidden_states = self.clip_img_in(image_embeds) - text_hidden_states = self.text_in(prompt_embeds) - - num_text_tokens, num_img_tokens = text_hidden_states.size(1), vae_hidden_states.size(1) - - # 1.2. Encode image timesteps to single token (B, 1, inner_dim) - if not torch.is_tensor(timestep_img): - timestep_img = torch.tensor([timestep_img], dtype=torch.long, device=vae_hidden_states.device) - - # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - timestep_img = timestep_img * torch.ones(batch_size, dtype=timestep_img.dtype, device=timestep_img.device) - - timestep_img_token = self.timestep_img_proj(timestep_img) - # t_img_token does not contain any weights and will always return f32 tensors - # but time_embedding might be fp16, so we need to cast here. - timestep_img_token = timestep_img_token.to(dtype=self.dtype) - timestep_img_token = self.timestep_img_embed(timestep_img_token) - timestep_img_token = timestep_img_token.unsqueeze(dim=1) - - # 1.3. Encode text timesteps to single token (B, 1, inner_dim) - if not torch.is_tensor(timestep_text): - timestep_text = torch.tensor([timestep_text], dtype=torch.long, device=vae_hidden_states.device) - - # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - timestep_text = timestep_text * torch.ones(batch_size, dtype=timestep_text.dtype, device=timestep_text.device) - - timestep_text_token = self.timestep_text_proj(timestep_text) - # t_text_token does not contain any weights and will always return f32 tensors - # but time_embedding might be fp16, so we need to cast here. - timestep_text_token = timestep_text_token.to(dtype=self.dtype) - timestep_text_token = self.timestep_text_embed(timestep_text_token) - timestep_text_token = timestep_text_token.unsqueeze(dim=1) - - # 1.4. Concatenate all of the embeddings together. - if self.use_data_type_embedding: - assert data_type is not None, "data_type must be supplied if the model uses a data type embedding" - if not torch.is_tensor(data_type): - data_type = torch.tensor([data_type], dtype=torch.int, device=vae_hidden_states.device) - - # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - data_type = data_type * torch.ones(batch_size, dtype=data_type.dtype, device=data_type.device) - - data_type_token = self.data_type_token_embedding(data_type).unsqueeze(dim=1) - hidden_states = torch.cat( - [ - timestep_img_token, - timestep_text_token, - data_type_token, - text_hidden_states, - clip_hidden_states, - vae_hidden_states, - ], - dim=1, - ) - else: - hidden_states = torch.cat( - [timestep_img_token, timestep_text_token, text_hidden_states, clip_hidden_states, vae_hidden_states], - dim=1, - ) - - # 1.5. Prepare the positional embeddings and add to hidden states - # Note: I think img_vae should always have the proper shape, so there's no need to interpolate - # the position embeddings. - if self.use_data_type_embedding: - pos_embed = torch.cat( - [self.pos_embed[:, : 1 + 1, :], self.data_type_pos_embed_token, self.pos_embed[:, 1 + 1 :, :]], dim=1 - ) - else: - pos_embed = self.pos_embed - hidden_states = hidden_states + pos_embed - hidden_states = self.pos_embed_drop(hidden_states) - - # 2. Blocks - hidden_states = self.transformer( - hidden_states, - encoder_hidden_states=encoder_hidden_states, - timestep=None, - class_labels=None, - cross_attention_kwargs=cross_attention_kwargs, - return_dict=False, - hidden_states_is_embedding=True, - unpatchify=False, - )[0] - - # 3. Output - # Split out the predicted noise representation. - if self.use_data_type_embedding: - ( - t_img_token_out, - t_text_token_out, - data_type_token_out, - text_out, - img_clip_out, - img_vae_out, - ) = hidden_states.split((1, 1, 1, num_text_tokens, 1, num_img_tokens), dim=1) - else: - t_img_token_out, t_text_token_out, text_out, img_clip_out, img_vae_out = hidden_states.split( - (1, 1, num_text_tokens, 1, num_img_tokens), dim=1 - ) - - img_vae_out = self.vae_img_out(img_vae_out) - - # unpatchify - height = width = int(img_vae_out.shape[1] ** 0.5) - img_vae_out = img_vae_out.reshape( - shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) - ) - img_vae_out = torch.einsum("nhwpqc->nchpwq", img_vae_out) - img_vae_out = img_vae_out.reshape( - shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) - ) - - img_clip_out = self.clip_img_out(img_clip_out) - - text_out = self.text_out(text_out) - - return img_vae_out, img_clip_out, text_out diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/get_modified_files.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/get_modified_files.py deleted file mode 100644 index 650c61ccb21eff8407147563b103733b472546cd..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/get_modified_files.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: -# python ./utils/get_modified_files.py utils src tests examples -# -# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered -# since the output of this script is fed into Makefile commands it doesn't print a newline after the results - -import re -import subprocess -import sys - - -fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") -modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split() - -joined_dirs = "|".join(sys.argv[1:]) -regex = re.compile(rf"^({joined_dirs}).*?\.py$") - -relevant_modified_files = [x for x in modified_files if regex.match(x)] -print(" ".join(relevant_modified_files), end="") diff --git a/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py deleted file mode 100644 index f5fee7e13cdfd531bf24d7c261e843855124f762..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py' -model = dict( - backbone=dict( - dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), - stage_with_dcn=(False, True, True, True))) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py deleted file mode 100644 index ad7b0346a63dfa3c3ca246b624155fc4fd331a3f..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py +++ /dev/null @@ -1,12 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict( - _delete_=True, - type='ModulatedDeformRoIPoolPack', - output_size=7, - output_channels=256), - out_channels=256, - featmap_strides=[4, 8, 16, 32]))) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py deleted file mode 100644 index 585cc2c332fd88a9f0164b14084d45d7a3783b11..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py' -# learning policy -lr_config = dict(step=[16, 22]) -runner = dict(type='EpochBasedRunner', max_epochs=24) diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/yolo_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/yolo_head.py deleted file mode 100644 index 25a005d36903333f37a6c6d31b4d613c071f4a07..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/yolo_head.py +++ /dev/null @@ -1,577 +0,0 @@ -# Copyright (c) 2019 Western Digital Corporation or its affiliates. - -import warnings - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, normal_init -from mmcv.runner import force_fp32 - -from mmdet.core import (build_anchor_generator, build_assigner, - build_bbox_coder, build_sampler, images_to_levels, - multi_apply, multiclass_nms) -from ..builder import HEADS, build_loss -from .base_dense_head import BaseDenseHead -from .dense_test_mixins import BBoxTestMixin - - -@HEADS.register_module() -class YOLOV3Head(BaseDenseHead, BBoxTestMixin): - """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767. - - Args: - num_classes (int): The number of object classes (w/o background) - in_channels (List[int]): Number of input channels per scale. - out_channels (List[int]): The number of output channels per scale - before the final 1x1 layer. Default: (1024, 512, 256). - anchor_generator (dict): Config dict for anchor generator - bbox_coder (dict): Config of bounding box coder. - featmap_strides (List[int]): The stride of each scale. - Should be in descending order. Default: (32, 16, 8). - one_hot_smoother (float): Set a non-zero value to enable label-smooth - Default: 0. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Dictionary to construct and config norm layer. - Default: dict(type='BN', requires_grad=True) - act_cfg (dict): Config dict for activation layer. - Default: dict(type='LeakyReLU', negative_slope=0.1). - loss_cls (dict): Config of classification loss. - loss_conf (dict): Config of confidence loss. - loss_xy (dict): Config of xy coordinate loss. - loss_wh (dict): Config of wh coordinate loss. - train_cfg (dict): Training config of YOLOV3 head. Default: None. - test_cfg (dict): Testing config of YOLOV3 head. Default: None. - """ - - def __init__(self, - num_classes, - in_channels, - out_channels=(1024, 512, 256), - anchor_generator=dict( - type='YOLOAnchorGenerator', - base_sizes=[[(116, 90), (156, 198), (373, 326)], - [(30, 61), (62, 45), (59, 119)], - [(10, 13), (16, 30), (33, 23)]], - strides=[32, 16, 8]), - bbox_coder=dict(type='YOLOBBoxCoder'), - featmap_strides=[32, 16, 8], - one_hot_smoother=0., - conv_cfg=None, - norm_cfg=dict(type='BN', requires_grad=True), - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - loss_cls=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_conf=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_xy=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - loss_wh=dict(type='MSELoss', loss_weight=1.0), - train_cfg=None, - test_cfg=None): - super(YOLOV3Head, self).__init__() - # Check params - assert (len(in_channels) == len(out_channels) == len(featmap_strides)) - - self.num_classes = num_classes - self.in_channels = in_channels - self.out_channels = out_channels - self.featmap_strides = featmap_strides - self.train_cfg = train_cfg - self.test_cfg = test_cfg - if self.train_cfg: - self.assigner = build_assigner(self.train_cfg.assigner) - if hasattr(self.train_cfg, 'sampler'): - sampler_cfg = self.train_cfg.sampler - else: - sampler_cfg = dict(type='PseudoSampler') - self.sampler = build_sampler(sampler_cfg, context=self) - - self.one_hot_smoother = one_hot_smoother - - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - - self.bbox_coder = build_bbox_coder(bbox_coder) - self.anchor_generator = build_anchor_generator(anchor_generator) - - self.loss_cls = build_loss(loss_cls) - self.loss_conf = build_loss(loss_conf) - self.loss_xy = build_loss(loss_xy) - self.loss_wh = build_loss(loss_wh) - # usually the numbers of anchors for each level are the same - # except SSD detectors - self.num_anchors = self.anchor_generator.num_base_anchors[0] - assert len( - self.anchor_generator.num_base_anchors) == len(featmap_strides) - self._init_layers() - - @property - def num_levels(self): - return len(self.featmap_strides) - - @property - def num_attrib(self): - """int: number of attributes in pred_map, bboxes (4) + - objectness (1) + num_classes""" - - return 5 + self.num_classes - - def _init_layers(self): - self.convs_bridge = nn.ModuleList() - self.convs_pred = nn.ModuleList() - for i in range(self.num_levels): - conv_bridge = ConvModule( - self.in_channels[i], - self.out_channels[i], - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - conv_pred = nn.Conv2d(self.out_channels[i], - self.num_anchors * self.num_attrib, 1) - - self.convs_bridge.append(conv_bridge) - self.convs_pred.append(conv_pred) - - def init_weights(self): - """Initialize weights of the head.""" - for m in self.convs_pred: - normal_init(m, std=0.01) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple[Tensor]: A tuple of multi-level predication map, each is a - 4D-tensor of shape (batch_size, 5+num_classes, height, width). - """ - - assert len(feats) == self.num_levels - pred_maps = [] - for i in range(self.num_levels): - x = feats[i] - x = self.convs_bridge[i](x) - pred_map = self.convs_pred[i](x) - pred_maps.append(pred_map) - - return tuple(pred_maps), - - @force_fp32(apply_to=('pred_maps', )) - def get_bboxes(self, - pred_maps, - img_metas, - cfg=None, - rescale=False, - with_nms=True): - """Transform network output for a batch into bbox predictions. - - Args: - pred_maps (list[Tensor]): Raw predictions for a batch of images. - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - cfg (mmcv.Config | None): Test / postprocessing configuration, - if None, test_cfg would be used. Default: None. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where 5 represent - (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. - The shape of the second tensor in the tuple is (n,), and - each element represents the class label of the corresponding - box. - """ - num_levels = len(pred_maps) - pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)] - scale_factors = [ - img_metas[i]['scale_factor'] - for i in range(pred_maps_list[0].shape[0]) - ] - result_list = self._get_bboxes(pred_maps_list, scale_factors, cfg, - rescale, with_nms) - return result_list - - def _get_bboxes(self, - pred_maps_list, - scale_factors, - cfg, - rescale=False, - with_nms=True): - """Transform outputs for a single batch item into bbox predictions. - - Args: - pred_maps_list (list[Tensor]): Prediction maps for different scales - of each single image in the batch. - scale_factors (list(ndarray)): Scale factor of the image arrange as - (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config | None): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where 5 represent - (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. - The shape of the second tensor in the tuple is (n,), and - each element represents the class label of the corresponding - box. - """ - cfg = self.test_cfg if cfg is None else cfg - assert len(pred_maps_list) == self.num_levels - - device = pred_maps_list[0].device - batch_size = pred_maps_list[0].shape[0] - - featmap_sizes = [ - pred_maps_list[i].shape[-2:] for i in range(self.num_levels) - ] - multi_lvl_anchors = self.anchor_generator.grid_anchors( - featmap_sizes, device) - # convert to tensor to keep tracing - nms_pre_tensor = torch.tensor( - cfg.get('nms_pre', -1), device=device, dtype=torch.long) - - multi_lvl_bboxes = [] - multi_lvl_cls_scores = [] - multi_lvl_conf_scores = [] - for i in range(self.num_levels): - # get some key info for current scale - pred_map = pred_maps_list[i] - stride = self.featmap_strides[i] - # (b,h, w, num_anchors*num_attrib) -> - # (b,h*w*num_anchors, num_attrib) - pred_map = pred_map.permute(0, 2, 3, - 1).reshape(batch_size, -1, - self.num_attrib) - # Inplace operation like - # ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])``` - # would create constant tensor when exporting to onnx - pred_map_conf = torch.sigmoid(pred_map[..., :2]) - pred_map_rest = pred_map[..., 2:] - pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1) - pred_map_boxes = pred_map[..., :4] - multi_lvl_anchor = multi_lvl_anchors[i] - multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes) - bbox_pred = self.bbox_coder.decode(multi_lvl_anchor, - pred_map_boxes, stride) - # conf and cls - conf_pred = torch.sigmoid(pred_map[..., 4]) - cls_pred = torch.sigmoid(pred_map[..., 5:]).view( - batch_size, -1, self.num_classes) # Cls pred one-hot. - - # Get top-k prediction - # Always keep topk op for dynamic input in onnx - if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export() - or conf_pred.shape[1] > nms_pre_tensor): - from torch import _shape_as_tensor - # keep shape as tensor and get k - num_anchor = _shape_as_tensor(conf_pred)[1].to(device) - nms_pre = torch.where(nms_pre_tensor < num_anchor, - nms_pre_tensor, num_anchor) - _, topk_inds = conf_pred.topk(nms_pre) - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds).long() - bbox_pred = bbox_pred[batch_inds, topk_inds, :] - cls_pred = cls_pred[batch_inds, topk_inds, :] - conf_pred = conf_pred[batch_inds, topk_inds] - - # Save the result of current scale - multi_lvl_bboxes.append(bbox_pred) - multi_lvl_cls_scores.append(cls_pred) - multi_lvl_conf_scores.append(conf_pred) - - # Merge the results of different scales together - batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1) - batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1) - batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1) - - # Set max number of box to be feed into nms in deployment - deploy_nms_pre = cfg.get('deploy_nms_pre', -1) - if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export(): - _, topk_inds = batch_mlvl_conf_scores.topk(deploy_nms_pre) - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds).long() - batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :] - batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :] - batch_mlvl_conf_scores = batch_mlvl_conf_scores[batch_inds, - topk_inds] - - if with_nms and (batch_mlvl_conf_scores.size(0) == 0): - return torch.zeros((0, 5)), torch.zeros((0, )) - - if rescale: - batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor( - scale_factors).unsqueeze(1) - - # In mmdet 2.x, the class_id for background is num_classes. - # i.e., the last column. - padding = batch_mlvl_scores.new_zeros(batch_size, - batch_mlvl_scores.shape[1], 1) - batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1) - - # Support exporting to onnx without nms - if with_nms and cfg.get('nms', None) is not None: - det_results = [] - for (mlvl_bboxes, mlvl_scores, - mlvl_conf_scores) in zip(batch_mlvl_bboxes, batch_mlvl_scores, - batch_mlvl_conf_scores): - # Filtering out all predictions with conf < conf_thr - conf_thr = cfg.get('conf_thr', -1) - if conf_thr > 0 and (not torch.onnx.is_in_onnx_export()): - # TensorRT not support NonZero - # add as_tuple=False for compatibility in Pytorch 1.6 - # flatten would create a Reshape op with constant values, - # and raise RuntimeError when doing inference in ONNX - # Runtime with a different input image (#4221). - conf_inds = mlvl_conf_scores.ge(conf_thr).nonzero( - as_tuple=False).squeeze(1) - mlvl_bboxes = mlvl_bboxes[conf_inds, :] - mlvl_scores = mlvl_scores[conf_inds, :] - mlvl_conf_scores = mlvl_conf_scores[conf_inds] - - det_bboxes, det_labels = multiclass_nms( - mlvl_bboxes, - mlvl_scores, - cfg.score_thr, - cfg.nms, - cfg.max_per_img, - score_factors=mlvl_conf_scores) - det_results.append(tuple([det_bboxes, det_labels])) - - else: - det_results = [ - tuple(mlvl_bs) - for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores, - batch_mlvl_conf_scores) - ] - return det_results - - @force_fp32(apply_to=('pred_maps', )) - def loss(self, - pred_maps, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - pred_maps (list[Tensor]): Prediction map for each scale level, - shape (N, num_anchors * num_attrib, H, W) - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - num_imgs = len(img_metas) - device = pred_maps[0][0].device - - featmap_sizes = [ - pred_maps[i].shape[-2:] for i in range(self.num_levels) - ] - multi_level_anchors = self.anchor_generator.grid_anchors( - featmap_sizes, device) - anchor_list = [multi_level_anchors for _ in range(num_imgs)] - - responsible_flag_list = [] - for img_id in range(len(img_metas)): - responsible_flag_list.append( - self.anchor_generator.responsible_flags( - featmap_sizes, gt_bboxes[img_id], device)) - - target_maps_list, neg_maps_list = self.get_targets( - anchor_list, responsible_flag_list, gt_bboxes, gt_labels) - - losses_cls, losses_conf, losses_xy, losses_wh = multi_apply( - self.loss_single, pred_maps, target_maps_list, neg_maps_list) - - return dict( - loss_cls=losses_cls, - loss_conf=losses_conf, - loss_xy=losses_xy, - loss_wh=losses_wh) - - def loss_single(self, pred_map, target_map, neg_map): - """Compute loss of a single image from a batch. - - Args: - pred_map (Tensor): Raw predictions for a single level. - target_map (Tensor): The Ground-Truth target for a single level. - neg_map (Tensor): The negative masks for a single level. - - Returns: - tuple: - loss_cls (Tensor): Classification loss. - loss_conf (Tensor): Confidence loss. - loss_xy (Tensor): Regression loss of x, y coordinate. - loss_wh (Tensor): Regression loss of w, h coordinate. - """ - - num_imgs = len(pred_map) - pred_map = pred_map.permute(0, 2, 3, - 1).reshape(num_imgs, -1, self.num_attrib) - neg_mask = neg_map.float() - pos_mask = target_map[..., 4] - pos_and_neg_mask = neg_mask + pos_mask - pos_mask = pos_mask.unsqueeze(dim=-1) - if torch.max(pos_and_neg_mask) > 1.: - warnings.warn('There is overlap between pos and neg sample.') - pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.) - - pred_xy = pred_map[..., :2] - pred_wh = pred_map[..., 2:4] - pred_conf = pred_map[..., 4] - pred_label = pred_map[..., 5:] - - target_xy = target_map[..., :2] - target_wh = target_map[..., 2:4] - target_conf = target_map[..., 4] - target_label = target_map[..., 5:] - - loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask) - loss_conf = self.loss_conf( - pred_conf, target_conf, weight=pos_and_neg_mask) - loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask) - loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask) - - return loss_cls, loss_conf, loss_xy, loss_wh - - def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list, - gt_labels_list): - """Compute target maps for anchors in multiple images. - - Args: - anchor_list (list[list[Tensor]]): Multi level anchors of each - image. The outer list indicates images, and the inner list - corresponds to feature levels of the image. Each element of - the inner list is a tensor of shape (num_total_anchors, 4). - responsible_flag_list (list[list[Tensor]]): Multi level responsible - flags of each image. Each element is a tensor of shape - (num_total_anchors, ) - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image. - gt_labels_list (list[Tensor]): Ground truth labels of each box. - - Returns: - tuple: Usually returns a tuple containing learning targets. - - target_map_list (list[Tensor]): Target map of each level. - - neg_map_list (list[Tensor]): Negative map of each level. - """ - num_imgs = len(anchor_list) - - # anchor number of multi levels - num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]] - - results = multi_apply(self._get_targets_single, anchor_list, - responsible_flag_list, gt_bboxes_list, - gt_labels_list) - - all_target_maps, all_neg_maps = results - assert num_imgs == len(all_target_maps) == len(all_neg_maps) - target_maps_list = images_to_levels(all_target_maps, num_level_anchors) - neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors) - - return target_maps_list, neg_maps_list - - def _get_targets_single(self, anchors, responsible_flags, gt_bboxes, - gt_labels): - """Generate matching bounding box prior and converted GT. - - Args: - anchors (list[Tensor]): Multi-level anchors of the image. - responsible_flags (list[Tensor]): Multi-level responsible flags of - anchors - gt_bboxes (Tensor): Ground truth bboxes of single image. - gt_labels (Tensor): Ground truth labels of single image. - - Returns: - tuple: - target_map (Tensor): Predication target map of each - scale level, shape (num_total_anchors, - 5+num_classes) - neg_map (Tensor): Negative map of each scale level, - shape (num_total_anchors,) - """ - - anchor_strides = [] - for i in range(len(anchors)): - anchor_strides.append( - torch.tensor(self.featmap_strides[i], - device=gt_bboxes.device).repeat(len(anchors[i]))) - concat_anchors = torch.cat(anchors) - concat_responsible_flags = torch.cat(responsible_flags) - - anchor_strides = torch.cat(anchor_strides) - assert len(anchor_strides) == len(concat_anchors) == \ - len(concat_responsible_flags) - assign_result = self.assigner.assign(concat_anchors, - concat_responsible_flags, - gt_bboxes) - sampling_result = self.sampler.sample(assign_result, concat_anchors, - gt_bboxes) - - target_map = concat_anchors.new_zeros( - concat_anchors.size(0), self.num_attrib) - - target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, - anchor_strides[sampling_result.pos_inds]) - - target_map[sampling_result.pos_inds, 4] = 1 - - gt_labels_one_hot = F.one_hot( - gt_labels, num_classes=self.num_classes).float() - if self.one_hot_smoother != 0: # label smooth - gt_labels_one_hot = gt_labels_one_hot * ( - 1 - self.one_hot_smoother - ) + self.one_hot_smoother / self.num_classes - target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[ - sampling_result.pos_assigned_gt_inds] - - neg_map = concat_anchors.new_zeros( - concat_anchors.size(0), dtype=torch.uint8) - neg_map[sampling_result.neg_inds] = 1 - - return target_map, neg_map - - def aug_test(self, feats, img_metas, rescale=False): - """Test function with test time augmentation. - - Args: - feats (list[Tensor]): the outer list indicates test-time - augmentations and inner Tensor should have a shape NxCxHxW, - which contains features for all images in the batch. - img_metas (list[list[dict]]): the outer list indicates test-time - augs (multiscale, flip, etc.) and the inner list indicates - images in a batch. each dict has image information. - rescale (bool, optional): Whether to rescale the results. - Defaults to False. - - Returns: - list[ndarray]: bbox results of each class - """ - return self.aug_test_bboxes(feats, img_metas, rescale=rescale) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py deleted file mode 100644 index a8fbd9beb11f3d1308ce2cd12da2a177c2d39478..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = [ - '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/ade20k.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' -] -model = dict( - decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py deleted file mode 100644 index 0aada9d8dcd792fd4fc7da8908cc11d44a9ff521..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,39 +0,0 @@ -_base_ = './ocrnet_hr18_512x1024_40k_cityscapes.py' -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w48', - backbone=dict( - extra=dict( - stage2=dict(num_channels=(48, 96)), - stage3=dict(num_channels=(48, 96, 192)), - stage4=dict(num_channels=(48, 96, 192, 384)))), - decode_head=[ - dict( - type='FCNHead', - in_channels=[48, 96, 192, 384], - channels=sum([48, 96, 192, 384]), - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - kernel_size=1, - num_convs=1, - norm_cfg=norm_cfg, - concat_input=False, - dropout_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - dict( - type='OCRHead', - in_channels=[48, 96, 192, 384], - channels=512, - ocr_channels=256, - input_transform='resize_concat', - in_index=(0, 1, 2, 3), - norm_cfg=norm_cfg, - dropout_ratio=-1, - num_classes=19, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) - ]) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py deleted file mode 100644 index f8865a7c4d795d9de3f5bc6b762b305b3cabc22f..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './psanet_r50-d8_512x512_80k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/apis/inference.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/apis/inference.py deleted file mode 100644 index 90bc1c0c68525734bd6793f07c15fe97d3c8342c..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/apis/inference.py +++ /dev/null @@ -1,136 +0,0 @@ -import matplotlib.pyplot as plt -import annotator.uniformer.mmcv as mmcv -import torch -from annotator.uniformer.mmcv.parallel import collate, scatter -from annotator.uniformer.mmcv.runner import load_checkpoint - -from annotator.uniformer.mmseg.datasets.pipelines import Compose -from annotator.uniformer.mmseg.models import build_segmentor - - -def init_segmentor(config, checkpoint=None, device='cuda:0'): - """Initialize a segmentor from config file. - - Args: - config (str or :obj:`mmcv.Config`): Config file path or the config - object. - checkpoint (str, optional): Checkpoint path. If left as None, the model - will not load any weights. - device (str, optional) CPU/CUDA device option. Default 'cuda:0'. - Use 'cpu' for loading model on CPU. - Returns: - nn.Module: The constructed segmentor. - """ - if isinstance(config, str): - config = mmcv.Config.fromfile(config) - elif not isinstance(config, mmcv.Config): - raise TypeError('config must be a filename or Config object, ' - 'but got {}'.format(type(config))) - config.model.pretrained = None - config.model.train_cfg = None - model = build_segmentor(config.model, test_cfg=config.get('test_cfg')) - if checkpoint is not None: - checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') - model.CLASSES = checkpoint['meta']['CLASSES'] - model.PALETTE = checkpoint['meta']['PALETTE'] - model.cfg = config # save the config in the model for convenience - model.to(device) - model.eval() - return model - - -class LoadImage: - """A simple pipeline to load image.""" - - def __call__(self, results): - """Call function to load images into results. - - Args: - results (dict): A result dict contains the file name - of the image to be read. - - Returns: - dict: ``results`` will be returned containing loaded image. - """ - - if isinstance(results['img'], str): - results['filename'] = results['img'] - results['ori_filename'] = results['img'] - else: - results['filename'] = None - results['ori_filename'] = None - img = mmcv.imread(results['img']) - results['img'] = img - results['img_shape'] = img.shape - results['ori_shape'] = img.shape - return results - - -def inference_segmentor(model, img): - """Inference image(s) with the segmentor. - - Args: - model (nn.Module): The loaded segmentor. - imgs (str/ndarray or list[str/ndarray]): Either image files or loaded - images. - - Returns: - (list[Tensor]): The segmentation result. - """ - cfg = model.cfg - device = next(model.parameters()).device # model device - # build the data pipeline - test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] - test_pipeline = Compose(test_pipeline) - # prepare data - data = dict(img=img) - data = test_pipeline(data) - data = collate([data], samples_per_gpu=1) - if next(model.parameters()).is_cuda: - # scatter to specified GPU - data = scatter(data, [device])[0] - else: - data['img_metas'] = [i.data[0] for i in data['img_metas']] - - # forward the model - with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - return result - - -def show_result_pyplot(model, - img, - result, - palette=None, - fig_size=(15, 10), - opacity=0.5, - title='', - block=True): - """Visualize the segmentation results on the image. - - Args: - model (nn.Module): The loaded segmentor. - img (str or np.ndarray): Image filename or loaded image. - result (list): The segmentation result. - palette (list[list[int]]] | None): The palette of segmentation - map. If None is given, random palette will be generated. - Default: None - fig_size (tuple): Figure size of the pyplot figure. - opacity(float): Opacity of painted segmentation map. - Default 0.5. - Must be in (0, 1] range. - title (str): The title of pyplot figure. - Default is ''. - block (bool): Whether to block the pyplot figure. - Default is True. - """ - if hasattr(model, 'module'): - model = model.module - img = model.show_result( - img, result, palette=palette, show=False, opacity=opacity) - # plt.figure(figsize=fig_size) - # plt.imshow(mmcv.bgr2rgb(img)) - # plt.title(title) - # plt.tight_layout() - # plt.show(block=block) - return mmcv.bgr2rgb(img) diff --git a/spaces/ArcAhmedEssam/CLIP-Interrogator-2/share_btn.py b/spaces/ArcAhmedEssam/CLIP-Interrogator-2/share_btn.py deleted file mode 100644 index 0f03b66de38b85ebd04515133ed354c362886b39..0000000000000000000000000000000000000000 --- a/spaces/ArcAhmedEssam/CLIP-Interrogator-2/share_btn.py +++ /dev/null @@ -1,70 +0,0 @@ -community_icon_html = """""" - -loading_icon_html = """""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - async function getInputImgFile(imgEl){ - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const isPng = imgEl.src.startsWith(`data:image/png`); - if(isPng){ - const fileName = `sd-perception-${{imgId}}.png`; - return new File([blob], fileName, { type: 'image/png' }); - }else{ - const fileName = `sd-perception-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - } - } - const gradioEl = document.querySelector('body > gradio-app'); - // const gradioEl = document.querySelector("gradio-app").shadowRoot; - const inputImgEl = gradioEl.querySelector('#input-img img'); - const outputTxt = gradioEl.querySelector('#output-txt textarea').value; - let titleTxt = outputTxt; - if(titleTxt.length > 100){ - titleTxt = titleTxt.slice(0, 100) + ' ...'; - } - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!outputTxt){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - const inputFile = await getInputImgFile(inputImgEl); - const urlInputImg = await uploadFile(inputFile); - const descriptionMd = `#### Input img: - -#### Caption: -${outputTxt}`; - const params = new URLSearchParams({ - title: titleTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/wheel.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/wheel.py deleted file mode 100644 index a8cd1330f0f73ac76832bdbd6b455b10bd91ba83..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/wheel.py +++ /dev/null @@ -1,740 +0,0 @@ -"""Support for installing and building the "wheel" binary package format. -""" - -import collections -import compileall -import contextlib -import csv -import importlib -import logging -import os.path -import re -import shutil -import sys -import warnings -from base64 import urlsafe_b64encode -from email.message import Message -from itertools import chain, filterfalse, starmap -from typing import ( - IO, - TYPE_CHECKING, - Any, - BinaryIO, - Callable, - Dict, - Generator, - Iterable, - Iterator, - List, - NewType, - Optional, - Sequence, - Set, - Tuple, - Union, - cast, -) -from zipfile import ZipFile, ZipInfo - -from pip._vendor.distlib.scripts import ScriptMaker -from pip._vendor.distlib.util import get_export_entry -from pip._vendor.packaging.utils import canonicalize_name - -from pip._internal.exceptions import InstallationError -from pip._internal.locations import get_major_minor_version -from pip._internal.metadata import ( - BaseDistribution, - FilesystemWheel, - get_wheel_distribution, -) -from pip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl -from pip._internal.models.scheme import SCHEME_KEYS, Scheme -from pip._internal.utils.filesystem import adjacent_tmp_file, replace -from pip._internal.utils.misc import captured_stdout, ensure_dir, hash_file, partition -from pip._internal.utils.unpacking import ( - current_umask, - is_within_directory, - set_extracted_file_to_default_mode_plus_executable, - zip_item_is_executable, -) -from pip._internal.utils.wheel import parse_wheel - -if TYPE_CHECKING: - from typing import Protocol - - class File(Protocol): - src_record_path: "RecordPath" - dest_path: str - changed: bool - - def save(self) -> None: - pass - - -logger = logging.getLogger(__name__) - -RecordPath = NewType("RecordPath", str) -InstalledCSVRow = Tuple[RecordPath, str, Union[int, str]] - - -def rehash(path: str, blocksize: int = 1 << 20) -> Tuple[str, str]: - """Return (encoded_digest, length) for path using hashlib.sha256()""" - h, length = hash_file(path, blocksize) - digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=") - return (digest, str(length)) - - -def csv_io_kwargs(mode: str) -> Dict[str, Any]: - """Return keyword arguments to properly open a CSV file - in the given mode. - """ - return {"mode": mode, "newline": "", "encoding": "utf-8"} - - -def fix_script(path: str) -> bool: - """Replace #!python with #!/path/to/python - Return True if file was changed. - """ - # XXX RECORD hashes will need to be updated - assert os.path.isfile(path) - - with open(path, "rb") as script: - firstline = script.readline() - if not firstline.startswith(b"#!python"): - return False - exename = sys.executable.encode(sys.getfilesystemencoding()) - firstline = b"#!" + exename + os.linesep.encode("ascii") - rest = script.read() - with open(path, "wb") as script: - script.write(firstline) - script.write(rest) - return True - - -def wheel_root_is_purelib(metadata: Message) -> bool: - return metadata.get("Root-Is-Purelib", "").lower() == "true" - - -def get_entrypoints(dist: BaseDistribution) -> Tuple[Dict[str, str], Dict[str, str]]: - console_scripts = {} - gui_scripts = {} - for entry_point in dist.iter_entry_points(): - if entry_point.group == "console_scripts": - console_scripts[entry_point.name] = entry_point.value - elif entry_point.group == "gui_scripts": - gui_scripts[entry_point.name] = entry_point.value - return console_scripts, gui_scripts - - -def message_about_scripts_not_on_PATH(scripts: Sequence[str]) -> Optional[str]: - """Determine if any scripts are not on PATH and format a warning. - Returns a warning message if one or more scripts are not on PATH, - otherwise None. - """ - if not scripts: - return None - - # Group scripts by the path they were installed in - grouped_by_dir: Dict[str, Set[str]] = collections.defaultdict(set) - for destfile in scripts: - parent_dir = os.path.dirname(destfile) - script_name = os.path.basename(destfile) - grouped_by_dir[parent_dir].add(script_name) - - # We don't want to warn for directories that are on PATH. - not_warn_dirs = [ - os.path.normcase(os.path.normpath(i)).rstrip(os.sep) - for i in os.environ.get("PATH", "").split(os.pathsep) - ] - # If an executable sits with sys.executable, we don't warn for it. - # This covers the case of venv invocations without activating the venv. - not_warn_dirs.append( - os.path.normcase(os.path.normpath(os.path.dirname(sys.executable))) - ) - warn_for: Dict[str, Set[str]] = { - parent_dir: scripts - for parent_dir, scripts in grouped_by_dir.items() - if os.path.normcase(os.path.normpath(parent_dir)) not in not_warn_dirs - } - if not warn_for: - return None - - # Format a message - msg_lines = [] - for parent_dir, dir_scripts in warn_for.items(): - sorted_scripts: List[str] = sorted(dir_scripts) - if len(sorted_scripts) == 1: - start_text = "script {} is".format(sorted_scripts[0]) - else: - start_text = "scripts {} are".format( - ", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1] - ) - - msg_lines.append( - "The {} installed in '{}' which is not on PATH.".format( - start_text, parent_dir - ) - ) - - last_line_fmt = ( - "Consider adding {} to PATH or, if you prefer " - "to suppress this warning, use --no-warn-script-location." - ) - if len(msg_lines) == 1: - msg_lines.append(last_line_fmt.format("this directory")) - else: - msg_lines.append(last_line_fmt.format("these directories")) - - # Add a note if any directory starts with ~ - warn_for_tilde = any( - i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i - ) - if warn_for_tilde: - tilde_warning_msg = ( - "NOTE: The current PATH contains path(s) starting with `~`, " - "which may not be expanded by all applications." - ) - msg_lines.append(tilde_warning_msg) - - # Returns the formatted multiline message - return "\n".join(msg_lines) - - -def _normalized_outrows( - outrows: Iterable[InstalledCSVRow], -) -> List[Tuple[str, str, str]]: - """Normalize the given rows of a RECORD file. - - Items in each row are converted into str. Rows are then sorted to make - the value more predictable for tests. - - Each row is a 3-tuple (path, hash, size) and corresponds to a record of - a RECORD file (see PEP 376 and PEP 427 for details). For the rows - passed to this function, the size can be an integer as an int or string, - or the empty string. - """ - # Normally, there should only be one row per path, in which case the - # second and third elements don't come into play when sorting. - # However, in cases in the wild where a path might happen to occur twice, - # we don't want the sort operation to trigger an error (but still want - # determinism). Since the third element can be an int or string, we - # coerce each element to a string to avoid a TypeError in this case. - # For additional background, see-- - # https://github.com/pypa/pip/issues/5868 - return sorted( - (record_path, hash_, str(size)) for record_path, hash_, size in outrows - ) - - -def _record_to_fs_path(record_path: RecordPath, lib_dir: str) -> str: - return os.path.join(lib_dir, record_path) - - -def _fs_to_record_path(path: str, lib_dir: str) -> RecordPath: - # On Windows, do not handle relative paths if they belong to different - # logical disks - if os.path.splitdrive(path)[0].lower() == os.path.splitdrive(lib_dir)[0].lower(): - path = os.path.relpath(path, lib_dir) - - path = path.replace(os.path.sep, "/") - return cast("RecordPath", path) - - -def get_csv_rows_for_installed( - old_csv_rows: List[List[str]], - installed: Dict[RecordPath, RecordPath], - changed: Set[RecordPath], - generated: List[str], - lib_dir: str, -) -> List[InstalledCSVRow]: - """ - :param installed: A map from archive RECORD path to installation RECORD - path. - """ - installed_rows: List[InstalledCSVRow] = [] - for row in old_csv_rows: - if len(row) > 3: - logger.warning("RECORD line has more than three elements: %s", row) - old_record_path = cast("RecordPath", row[0]) - new_record_path = installed.pop(old_record_path, old_record_path) - if new_record_path in changed: - digest, length = rehash(_record_to_fs_path(new_record_path, lib_dir)) - else: - digest = row[1] if len(row) > 1 else "" - length = row[2] if len(row) > 2 else "" - installed_rows.append((new_record_path, digest, length)) - for f in generated: - path = _fs_to_record_path(f, lib_dir) - digest, length = rehash(f) - installed_rows.append((path, digest, length)) - for installed_record_path in installed.values(): - installed_rows.append((installed_record_path, "", "")) - return installed_rows - - -def get_console_script_specs(console: Dict[str, str]) -> List[str]: - """ - Given the mapping from entrypoint name to callable, return the relevant - console script specs. - """ - # Don't mutate caller's version - console = console.copy() - - scripts_to_generate = [] - - # Special case pip and setuptools to generate versioned wrappers - # - # The issue is that some projects (specifically, pip and setuptools) use - # code in setup.py to create "versioned" entry points - pip2.7 on Python - # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into - # the wheel metadata at build time, and so if the wheel is installed with - # a *different* version of Python the entry points will be wrong. The - # correct fix for this is to enhance the metadata to be able to describe - # such versioned entry points, but that won't happen till Metadata 2.0 is - # available. - # In the meantime, projects using versioned entry points will either have - # incorrect versioned entry points, or they will not be able to distribute - # "universal" wheels (i.e., they will need a wheel per Python version). - # - # Because setuptools and pip are bundled with _ensurepip and virtualenv, - # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we - # override the versioned entry points in the wheel and generate the - # correct ones. This code is purely a short-term measure until Metadata 2.0 - # is available. - # - # To add the level of hack in this section of code, in order to support - # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment - # variable which will control which version scripts get installed. - # - # ENSUREPIP_OPTIONS=altinstall - # - Only pipX.Y and easy_install-X.Y will be generated and installed - # ENSUREPIP_OPTIONS=install - # - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note - # that this option is technically if ENSUREPIP_OPTIONS is set and is - # not altinstall - # DEFAULT - # - The default behavior is to install pip, pipX, pipX.Y, easy_install - # and easy_install-X.Y. - pip_script = console.pop("pip", None) - if pip_script: - if "ENSUREPIP_OPTIONS" not in os.environ: - scripts_to_generate.append("pip = " + pip_script) - - if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall": - scripts_to_generate.append( - "pip{} = {}".format(sys.version_info[0], pip_script) - ) - - scripts_to_generate.append(f"pip{get_major_minor_version()} = {pip_script}") - # Delete any other versioned pip entry points - pip_ep = [k for k in console if re.match(r"pip(\d+(\.\d+)?)?$", k)] - for k in pip_ep: - del console[k] - easy_install_script = console.pop("easy_install", None) - if easy_install_script: - if "ENSUREPIP_OPTIONS" not in os.environ: - scripts_to_generate.append("easy_install = " + easy_install_script) - - scripts_to_generate.append( - "easy_install-{} = {}".format( - get_major_minor_version(), easy_install_script - ) - ) - # Delete any other versioned easy_install entry points - easy_install_ep = [ - k for k in console if re.match(r"easy_install(-\d+\.\d+)?$", k) - ] - for k in easy_install_ep: - del console[k] - - # Generate the console entry points specified in the wheel - scripts_to_generate.extend(starmap("{} = {}".format, console.items())) - - return scripts_to_generate - - -class ZipBackedFile: - def __init__( - self, src_record_path: RecordPath, dest_path: str, zip_file: ZipFile - ) -> None: - self.src_record_path = src_record_path - self.dest_path = dest_path - self._zip_file = zip_file - self.changed = False - - def _getinfo(self) -> ZipInfo: - return self._zip_file.getinfo(self.src_record_path) - - def save(self) -> None: - # directory creation is lazy and after file filtering - # to ensure we don't install empty dirs; empty dirs can't be - # uninstalled. - parent_dir = os.path.dirname(self.dest_path) - ensure_dir(parent_dir) - - # When we open the output file below, any existing file is truncated - # before we start writing the new contents. This is fine in most - # cases, but can cause a segfault if pip has loaded a shared - # object (e.g. from pyopenssl through its vendored urllib3) - # Since the shared object is mmap'd an attempt to call a - # symbol in it will then cause a segfault. Unlinking the file - # allows writing of new contents while allowing the process to - # continue to use the old copy. - if os.path.exists(self.dest_path): - os.unlink(self.dest_path) - - zipinfo = self._getinfo() - - with self._zip_file.open(zipinfo) as f: - with open(self.dest_path, "wb") as dest: - shutil.copyfileobj(f, dest) - - if zip_item_is_executable(zipinfo): - set_extracted_file_to_default_mode_plus_executable(self.dest_path) - - -class ScriptFile: - def __init__(self, file: "File") -> None: - self._file = file - self.src_record_path = self._file.src_record_path - self.dest_path = self._file.dest_path - self.changed = False - - def save(self) -> None: - self._file.save() - self.changed = fix_script(self.dest_path) - - -class MissingCallableSuffix(InstallationError): - def __init__(self, entry_point: str) -> None: - super().__init__( - "Invalid script entry point: {} - A callable " - "suffix is required. Cf https://packaging.python.org/" - "specifications/entry-points/#use-for-scripts for more " - "information.".format(entry_point) - ) - - -def _raise_for_invalid_entrypoint(specification: str) -> None: - entry = get_export_entry(specification) - if entry is not None and entry.suffix is None: - raise MissingCallableSuffix(str(entry)) - - -class PipScriptMaker(ScriptMaker): - def make( - self, specification: str, options: Optional[Dict[str, Any]] = None - ) -> List[str]: - _raise_for_invalid_entrypoint(specification) - return super().make(specification, options) - - -def _install_wheel( - name: str, - wheel_zip: ZipFile, - wheel_path: str, - scheme: Scheme, - pycompile: bool = True, - warn_script_location: bool = True, - direct_url: Optional[DirectUrl] = None, - requested: bool = False, -) -> None: - """Install a wheel. - - :param name: Name of the project to install - :param wheel_zip: open ZipFile for wheel being installed - :param scheme: Distutils scheme dictating the install directories - :param req_description: String used in place of the requirement, for - logging - :param pycompile: Whether to byte-compile installed Python files - :param warn_script_location: Whether to check that scripts are installed - into a directory on PATH - :raises UnsupportedWheel: - * when the directory holds an unpacked wheel with incompatible - Wheel-Version - * when the .dist-info dir does not match the wheel - """ - info_dir, metadata = parse_wheel(wheel_zip, name) - - if wheel_root_is_purelib(metadata): - lib_dir = scheme.purelib - else: - lib_dir = scheme.platlib - - # Record details of the files moved - # installed = files copied from the wheel to the destination - # changed = files changed while installing (scripts #! line typically) - # generated = files newly generated during the install (script wrappers) - installed: Dict[RecordPath, RecordPath] = {} - changed: Set[RecordPath] = set() - generated: List[str] = [] - - def record_installed( - srcfile: RecordPath, destfile: str, modified: bool = False - ) -> None: - """Map archive RECORD paths to installation RECORD paths.""" - newpath = _fs_to_record_path(destfile, lib_dir) - installed[srcfile] = newpath - if modified: - changed.add(newpath) - - def is_dir_path(path: RecordPath) -> bool: - return path.endswith("/") - - def assert_no_path_traversal(dest_dir_path: str, target_path: str) -> None: - if not is_within_directory(dest_dir_path, target_path): - message = ( - "The wheel {!r} has a file {!r} trying to install" - " outside the target directory {!r}" - ) - raise InstallationError( - message.format(wheel_path, target_path, dest_dir_path) - ) - - def root_scheme_file_maker( - zip_file: ZipFile, dest: str - ) -> Callable[[RecordPath], "File"]: - def make_root_scheme_file(record_path: RecordPath) -> "File": - normed_path = os.path.normpath(record_path) - dest_path = os.path.join(dest, normed_path) - assert_no_path_traversal(dest, dest_path) - return ZipBackedFile(record_path, dest_path, zip_file) - - return make_root_scheme_file - - def data_scheme_file_maker( - zip_file: ZipFile, scheme: Scheme - ) -> Callable[[RecordPath], "File"]: - scheme_paths = {key: getattr(scheme, key) for key in SCHEME_KEYS} - - def make_data_scheme_file(record_path: RecordPath) -> "File": - normed_path = os.path.normpath(record_path) - try: - _, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2) - except ValueError: - message = ( - "Unexpected file in {}: {!r}. .data directory contents" - " should be named like: '/'." - ).format(wheel_path, record_path) - raise InstallationError(message) - - try: - scheme_path = scheme_paths[scheme_key] - except KeyError: - valid_scheme_keys = ", ".join(sorted(scheme_paths)) - message = ( - "Unknown scheme key used in {}: {} (for file {!r}). .data" - " directory contents should be in subdirectories named" - " with a valid scheme key ({})" - ).format(wheel_path, scheme_key, record_path, valid_scheme_keys) - raise InstallationError(message) - - dest_path = os.path.join(scheme_path, dest_subpath) - assert_no_path_traversal(scheme_path, dest_path) - return ZipBackedFile(record_path, dest_path, zip_file) - - return make_data_scheme_file - - def is_data_scheme_path(path: RecordPath) -> bool: - return path.split("/", 1)[0].endswith(".data") - - paths = cast(List[RecordPath], wheel_zip.namelist()) - file_paths = filterfalse(is_dir_path, paths) - root_scheme_paths, data_scheme_paths = partition(is_data_scheme_path, file_paths) - - make_root_scheme_file = root_scheme_file_maker(wheel_zip, lib_dir) - files: Iterator[File] = map(make_root_scheme_file, root_scheme_paths) - - def is_script_scheme_path(path: RecordPath) -> bool: - parts = path.split("/", 2) - return len(parts) > 2 and parts[0].endswith(".data") and parts[1] == "scripts" - - other_scheme_paths, script_scheme_paths = partition( - is_script_scheme_path, data_scheme_paths - ) - - make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme) - other_scheme_files = map(make_data_scheme_file, other_scheme_paths) - files = chain(files, other_scheme_files) - - # Get the defined entry points - distribution = get_wheel_distribution( - FilesystemWheel(wheel_path), - canonicalize_name(name), - ) - console, gui = get_entrypoints(distribution) - - def is_entrypoint_wrapper(file: "File") -> bool: - # EP, EP.exe and EP-script.py are scripts generated for - # entry point EP by setuptools - path = file.dest_path - name = os.path.basename(path) - if name.lower().endswith(".exe"): - matchname = name[:-4] - elif name.lower().endswith("-script.py"): - matchname = name[:-10] - elif name.lower().endswith(".pya"): - matchname = name[:-4] - else: - matchname = name - # Ignore setuptools-generated scripts - return matchname in console or matchname in gui - - script_scheme_files: Iterator[File] = map( - make_data_scheme_file, script_scheme_paths - ) - script_scheme_files = filterfalse(is_entrypoint_wrapper, script_scheme_files) - script_scheme_files = map(ScriptFile, script_scheme_files) - files = chain(files, script_scheme_files) - - for file in files: - file.save() - record_installed(file.src_record_path, file.dest_path, file.changed) - - def pyc_source_file_paths() -> Generator[str, None, None]: - # We de-duplicate installation paths, since there can be overlap (e.g. - # file in .data maps to same location as file in wheel root). - # Sorting installation paths makes it easier to reproduce and debug - # issues related to permissions on existing files. - for installed_path in sorted(set(installed.values())): - full_installed_path = os.path.join(lib_dir, installed_path) - if not os.path.isfile(full_installed_path): - continue - if not full_installed_path.endswith(".py"): - continue - yield full_installed_path - - def pyc_output_path(path: str) -> str: - """Return the path the pyc file would have been written to.""" - return importlib.util.cache_from_source(path) - - # Compile all of the pyc files for the installed files - if pycompile: - with captured_stdout() as stdout: - with warnings.catch_warnings(): - warnings.filterwarnings("ignore") - for path in pyc_source_file_paths(): - success = compileall.compile_file(path, force=True, quiet=True) - if success: - pyc_path = pyc_output_path(path) - assert os.path.exists(pyc_path) - pyc_record_path = cast( - "RecordPath", pyc_path.replace(os.path.sep, "/") - ) - record_installed(pyc_record_path, pyc_path) - logger.debug(stdout.getvalue()) - - maker = PipScriptMaker(None, scheme.scripts) - - # Ensure old scripts are overwritten. - # See https://github.com/pypa/pip/issues/1800 - maker.clobber = True - - # Ensure we don't generate any variants for scripts because this is almost - # never what somebody wants. - # See https://bitbucket.org/pypa/distlib/issue/35/ - maker.variants = {""} - - # This is required because otherwise distlib creates scripts that are not - # executable. - # See https://bitbucket.org/pypa/distlib/issue/32/ - maker.set_mode = True - - # Generate the console and GUI entry points specified in the wheel - scripts_to_generate = get_console_script_specs(console) - - gui_scripts_to_generate = list(starmap("{} = {}".format, gui.items())) - - generated_console_scripts = maker.make_multiple(scripts_to_generate) - generated.extend(generated_console_scripts) - - generated.extend(maker.make_multiple(gui_scripts_to_generate, {"gui": True})) - - if warn_script_location: - msg = message_about_scripts_not_on_PATH(generated_console_scripts) - if msg is not None: - logger.warning(msg) - - generated_file_mode = 0o666 & ~current_umask() - - @contextlib.contextmanager - def _generate_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]: - with adjacent_tmp_file(path, **kwargs) as f: - yield f - os.chmod(f.name, generated_file_mode) - replace(f.name, path) - - dest_info_dir = os.path.join(lib_dir, info_dir) - - # Record pip as the installer - installer_path = os.path.join(dest_info_dir, "INSTALLER") - with _generate_file(installer_path) as installer_file: - installer_file.write(b"pip\n") - generated.append(installer_path) - - # Record the PEP 610 direct URL reference - if direct_url is not None: - direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME) - with _generate_file(direct_url_path) as direct_url_file: - direct_url_file.write(direct_url.to_json().encode("utf-8")) - generated.append(direct_url_path) - - # Record the REQUESTED file - if requested: - requested_path = os.path.join(dest_info_dir, "REQUESTED") - with open(requested_path, "wb"): - pass - generated.append(requested_path) - - record_text = distribution.read_text("RECORD") - record_rows = list(csv.reader(record_text.splitlines())) - - rows = get_csv_rows_for_installed( - record_rows, - installed=installed, - changed=changed, - generated=generated, - lib_dir=lib_dir, - ) - - # Record details of all files installed - record_path = os.path.join(dest_info_dir, "RECORD") - - with _generate_file(record_path, **csv_io_kwargs("w")) as record_file: - # Explicitly cast to typing.IO[str] as a workaround for the mypy error: - # "writer" has incompatible type "BinaryIO"; expected "_Writer" - writer = csv.writer(cast("IO[str]", record_file)) - writer.writerows(_normalized_outrows(rows)) - - -@contextlib.contextmanager -def req_error_context(req_description: str) -> Generator[None, None, None]: - try: - yield - except InstallationError as e: - message = "For req: {}. {}".format(req_description, e.args[0]) - raise InstallationError(message) from e - - -def install_wheel( - name: str, - wheel_path: str, - scheme: Scheme, - req_description: str, - pycompile: bool = True, - warn_script_location: bool = True, - direct_url: Optional[DirectUrl] = None, - requested: bool = False, -) -> None: - with ZipFile(wheel_path, allowZip64=True) as z: - with req_error_context(req_description): - _install_wheel( - name=name, - wheel_zip=z, - wheel_path=wheel_path, - scheme=scheme, - pycompile=pycompile, - warn_script_location=warn_script_location, - direct_url=direct_url, - requested=requested, - ) diff --git a/spaces/Benson/text-generation/Examples/Avarampoo Descarga De Pelculas Pelculas.md b/spaces/Benson/text-generation/Examples/Avarampoo Descarga De Pelculas Pelculas.md deleted file mode 100644 index 8f55803053c332773b056ec3801ec5eee03e8b97..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Avarampoo Descarga De Pelculas Pelculas.md +++ /dev/null @@ -1,71 +0,0 @@ -
-

Avarampoo Movie Download Moviesda: Cómo ver en línea gratis

-

¿Eres un fan de las películas tamiles y buscas una manera de ver la película de Avarampoo en línea gratis? Si es así, entonces has venido al lugar correcto. En este artículo, le diremos todo lo que necesita saber sobre la película de Avarampoo, el sitio web de Moviesda y cómo descargar o transmitir la película de Avarampoo en línea de forma gratuita. Así que, sin más preámbulos, empecemos.

-

avarampoo descarga de películas películas


DOWNLOAD ★★★★★ https://bltlly.com/2v6KoS



-

Introducción

-

¿Qué es la película de Avarampoo?

-

Avarampoo (transl. Avaram senna) es una película de 1992 dirigida por Bharathan y producida por Keyaar. Es un remake de la película malayalam de 1980 Thakara, y protagonizada por Vineeth, Nandhini y Nassar en los papeles principales. La película fue lanzada el 5 de junio de 1992.

-

La trama de la película gira en torno a Sakkarai, un huérfano con problemas mentales que trabaja para el jefe de la aldea Thevar. Se enamora de la hija de su amo, Thamarai, pero se enfrenta a la oposición de Thevar y su segunda esposa. La película explora los temas de la inocencia, el amor, la traición y la injusticia social.

-

¿Qué es el sitio web de Moviesda?

-

Moviesda es un sitio web que ofrece una amplia gama de películas tamiles, incluyendo los últimos lanzamientos en alta calidad de definición. Su extenso catálogo y la posibilidad de acceder a las películas de forma gratuita ha dado a la plataforma una ventaja significativa en popularidad. Sin embargo, es importante tener en cuenta que Moviesda está asociado con la infracción de derechos de autor y la piratería.

-

Moviesda sube versiones piratas de películas en hindi, marathi, inglés, telugu, tamil y malayalam en su portal. Los usuarios pueden descargar o transmitir estas películas sin pagar ninguna cuota de suscripción o registro. Moviesda también ofrece varios formatos y calidades de películas como 300mb, 720p, 1080p, etc.

-

-

¿Por qué es popular la película de Avarampoo?

-

La película Avarampoo es popular entre los amantes del cine tamil por varias razones. Algunas de ellas son:

-
    - -
  • La película cuenta con Vineeth, Nandhini y Nassar, que son actores muy conocidos en el cine tamil. Vineeth is known for his roles in films such as Pudhiya Mugam, May Madham, Kaadhal Desam, etc. Nandhini is known for her roles in films such as Chinna Thambi Periya Thambi, Kizhakku Vasal, etc. Nassar is known for his roles in films such as Nayakan, Roja, Bombay, etc.
  • -
  • La película tiene una historia conmovedora y realista que representa la difícil situación de un niño con discapacidad mental que se enamora de una chica de una casta superior y se enfrenta a la ira de su familia y la sociedad.
  • -
  • La película tiene una música melodiosa y conmovedora compuesta por Ilaiyaraaja, considerado uno de los mejores compositores del cine indio. Ha ganado cinco National Film Awards y seis Tamil Nadu State Film Awards por su música.
  • -
  • La película ha recibido críticas positivas de críticos y audiencias por igual. Ha sido elogiado por su dirección, actuaciones, música, cinematografía y guion. También ha sido seleccionado para varios festivales de cine y premios.
  • -
-

¿Cómo descargar la película de Avarampoo de Moviesda?

-

Si desea descargar la película de Avarampoo desde Moviesda, debe seguir estos pasos:

-

Paso 1: Instalar una VPN y conectarse a un servidor donde Moviesda es accesible

-

Una VPN o una red privada virtual es un software que le permite acceder a sitios web bloqueados o restringidos cambiando su dirección IP y cifrando sus datos. Dado que Moviesda es un sitio web ilegal que puede estar prohibido o bloqueado en algunos países o regiones, debe usar una VPN para acceder a él de forma segura y anónima.

- -

Paso 2: Visita el sitio web de Moviesda y busca películas de Avarampoo

-

Después de conectarse a un servidor VPN, debe visitar el sitio web de Moviesda usando su navegador. El sitio web puede tener diferentes nombres de dominio o extensiones debido a cambios frecuentes. Algunos de los posibles nombres de dominio son moviesda.com, moviesda.net, moviesda.in, etc. Puede utilizar un motor de búsqueda como Google o Bing para encontrar el último nombre de dominio de Moviesda.

-

Una vez que haya llegado al sitio web de Moviesda, debe buscar una película de Avarampoo usando la barra de búsqueda o las categorías. También puede navegar a través de las últimas subidas o la lista alfabética de películas. Puede encontrar múltiples resultados para la película de Avarampoo con diferentes fechas de lanzamiento, idiomas o versiones. Debe seleccionar el que coincida con su preferencia.

-

Paso 3: Elija la calidad y el formato de la película y haga clic en el enlace de descarga

-

Después de seleccionar la película de Avarampoo que desea descargar, debe elegir la calidad y el formato de la película que se adapte a su dispositivo y la velocidad de Internet. Moviesda ofrece varias calidades y formatos de películas como 300mb, 720p, 1080p, mp4, mkv, etc. También puede comprobar el tamaño del archivo y la duración de la película antes de descargarlo.

-

Una vez que haya elegido la calidad y el formato de la película, debe hacer clic en el enlace de descarga que lo redirigirá a otra página o sitio. Usted puede encontrar algunos anuncios emergentes o verificación de captcha en esta página o sitio. Necesitas cerrar los anuncios o completar la verificación para proceder con la descarga.

-

Paso 4: Espere a que la descarga se complete y disfrute de la película sin conexión

-

Después de hacer clic en el enlace de descarga, debe esperar a que se complete la descarga. La velocidad de descarga puede variar dependiendo de su conexión a Internet y la carga del servidor. Puede comprobar el progreso de la descarga en su navegador o gestor de descargas.

- -

¿Cómo ver la película de Avarampoo en línea gratis?

-

Si no desea descargar la película de Avarampoo desde Moviesda o cualquier otro sitio web ilegal, puede verla en línea de forma gratuita utilizando algunas alternativas legales. Estas son algunas de las opciones que puedes probar:

-

Opción 1: Utilice una plataforma de streaming que ofrece películas de Avarampoo

-

Una plataforma de streaming es un sitio web o una aplicación que te permite ver películas y programas en línea sin descargarlos. Algunas de las plataformas de streaming son gratuitas, mientras que otras requieren una cuota de suscripción o registro. Algunas de las plataformas de streaming que ofrecen películas de Avarampoo son:

-

Tamilyogi

-

Tamilyogi es una plataforma de streaming gratuita que ofrece una gran colección de películas y programas tamiles. Puedes ver la película de Avarampoo en línea gratis en Tamilyogi sin ningún registro o suscripción. Sin embargo, es posible que tenga que lidiar con algunos anuncios molestos y ventanas emergentes mientras transmite la película. También puede elegir la calidad y el formato de la película según su preferencia.

-

YouTube

-

YouTube es una de las plataformas de streaming más populares y ampliamente utilizadas en el mundo. Ofrece una variedad de contenido incluyendo películas, programas, música, videos, etc. Puede ver la película de Avarampoo en línea de forma gratuita en YouTube si está disponible en la plataforma. También puede utilizar la barra de búsqueda o los filtros para encontrar la película fácilmente. También puede ajustar la calidad y la velocidad de la película según su conveniencia.

-

Opción 2: Utilice un sitio de torrent que tiene película de Avarampoo

-

Un sitio de torrent es un sitio web que le permite descargar o compartir archivos utilizando una red de igual a igual. Un archivo torrent es un archivo pequeño que contiene información sobre el archivo más grande que desea descargar. Necesita un cliente de torrent o software para descargar o cargar archivos usando un sitio de torrent. Algunos de los sitios de torrent que tienen una película de Avarampoo son:

-

Tamilrockers

- -

Isaimini

-

Isaimini es otro popular sitio de torrents que ofrece una gran colección de películas y programas tamiles. Puede descargar la película de Avarampoo desde Isaimini utilizando un cliente de torrent como BitTorrent o uTorrent. Sin embargo, debe tener cuidado de que Isaimini también es un sitio web ilegal que puede estar sujeto a acciones legales o sanciones.

-

Conclusión

-

Resumen de los puntos principales

-

En este artículo, hemos discutido la película de Avarampoo, el sitio web de Moviesda, y cómo descargar o ver la película de Avarampoo en línea gratis. También hemos proporcionado algunas alternativas legales a Moviesda que puede utilizar para ver la película de Avarampoo en línea de forma gratuita. Esperamos que haya encontrado este artículo útil e informativo.

-

Descargo de responsabilidad y advertencia sobre piratería y cuestiones legales

-

Nos gustaría recordarle que descargar o transmitir películas piratas de sitios web ilegales como Moviesda es un delito y una violación de los derechos de propiedad intelectual. La piratería es un delito grave que puede dar lugar a acciones legales o sanciones. No respaldamos ni promovemos tales sitios web o actividades. Le aconsejamos respetar la ley y los derechos de los creadores y propietarios de las películas.

-

Llamada a la acción y petición de comentarios

-

Si te gustó este artículo, por favor compártelo con tus amigos y familiares que están interesados en ver la película de Avarampoo en línea gratis. También, por favor deje sus comentarios y comentarios a continuación. Nos encantaría saber de usted y mejorar nuestra calidad de contenido.

-

Preguntas frecuentes

-
    -
  • Q: ¿Cuál es la calificación IMDb de la película de Avarampoo?
  • -
  • A: La calificación de IMDb de la película de Avarampoo es 7.1 de 10 basado en 35 comentarios de usuarios.
  • -
  • Q: ¿Quiénes son los cantantes de las canciones en la película de Avarampoo?
  • -
  • A: Los cantantes de las canciones en la película de Avarampoo son S.P.Balasubrahmanyam, K.S.Chithra, Mano, Swarnalatha, etc.
  • -
  • Q: ¿Dónde puedo ver una película de Avarampoo con subtítulos?
  • - -
  • Q: ¿Cuánto dura una película de Avarampoo?
  • -
  • A: La película de Avarampoo dura 2 horas y 17 minutos.
  • -
  • Q: ¿Cuáles son algunas otras películas similares a la película de Avarampoo?
  • -
  • A: Algunas otras películas similares a la película de Avarampoo son Thakara (1980), Kadhal Kottai (1996), Kaadhal (2004), etc.
  • -

64aa2da5cf
-
-
\ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Betty Noir Fuente Descargar.md b/spaces/Benson/text-generation/Examples/Betty Noir Fuente Descargar.md deleted file mode 100644 index c8f57807125b293caadc120ea6eaa17011c08d1b..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Betty Noir Fuente Descargar.md +++ /dev/null @@ -1,81 +0,0 @@ - - - - -

Betty Noir Font Download: Una tipografía retro y elegante para sus proyectos

-

Si usted está buscando una fuente que puede añadir un toque vintage y personalidad a sus proyectos, es posible que desee echa un vistazo a Betty Noir Font. Esta fuente está inspirada en las clásicas películas noir de Hollywood de las décadas de 1940 y 1950, y tiene un distintivo aspecto retro y elegante. En este artículo, le diremos todo lo que necesita saber sobre Betty Noir Font, incluyendo su historia, características, uso y fuentes de descarga. También te mostraremos algunos ejemplos de cómo se puede usar Betty Noir Font en diseño web y arte gráfico. ¡Empecemos!

-

betty noir fuente descargar


DOWNLOADhttps://bltlly.com/2v6Ktr



-

¿Qué es Betty Noir Font?

-

Betty Noir Font es una fuente gratuita creada por Nate Piekos de Blambot Fonts. Fue lanzado por primera vez en 2004, y ha sido descargado más de 400.000 veces desde entonces. La fuente lleva el nombre de Betty Grable, una de las actrices más populares de la era negra. La fuente está diseñada para imitar el estilo de los títulos de películas y carteles de ese período de tiempo, con una mezcla de elementos art deco y script.

-

La historia y características de Betty Noir Fuente

-

El género negro surgió en la década de 1940 como una subcategoría de la ficción criminal y el cine. Fue influenciado por el expresionismo alemán, las novelas policíacas y la desilusión de la posguerra. Las historias noir típicamente presentaban protagonistas cínicos, mujeres fatales, autoridades corruptas y atmósferas oscuras. Algunas de las películas noir más famosas incluyen The Maltese Falcon, Double Indemnity, The Big Sleep, Sunset Boulevard y The Third Man.

- -

Cómo usar Betty Noir Font en tus diseños

-

Betty Noir Font es una fuente versátil que se puede usar para varios propósitos. Aquí hay algunos consejos sobre cómo usarla eficazmente:

-
    -
  • Utilizarlo para titulares, títulos, logotipos, banners, carteles, folletos, invitaciones, etiquetas, etc. Puede crear un fuerte impacto visual y atraer la atención.
  • -
  • Úselo con moderación para el texto del cuerpo o los párrafos largos. Puede ser difícil de leer en tamaños más pequeños o en fondos de bajo contraste.
  • -
  • Úsalo con fuentes complementarias que coincidan con su estilo y estado de ánimo. Por ejemplo, puede combinarlo con fuentes sans-serif como Arial o Verdana para un aspecto moderno o con fuentes serif como Times New Roman o Georgia para un aspecto clásico.
  • -
  • Úsalo con colores apropiados que realcen su ambiente retro. Por ejemplo, puede usar blanco y negro para un efecto negro o colores brillantes como rojo o amarillo para un efecto de arte pop.
  • -
  • Úsalo con imágenes que se adapten a su tema y género. Por ejemplo, puedes usar fotos o ilustraciones de coches antiguos, edificios - - - -

    ¿Dónde descargar Betty Noir Font?

    -

    Betty Noir Font es una fuente gratuita que puede descargar de varios sitios web. Sin embargo, no todas las fuentes son confiables o legales. Algunos pueden contener virus o malware que pueden dañar su computadora o spyware que puede robar su información personal. Por lo tanto, siempre debe descargar fuentes de fuentes legales y de buena reputación. Estos son algunos de los mejores sitios web donde se puede descargar Betty Noir Font de forma gratuita y legal:

    -
      -
    • [DaFont]( 1 ): Este es uno de los sitios web más populares para las fuentes gratuitas, con más de 40.000 fuentes para elegir. Puedes encontrar Betty Noir Font en la categoría Fantasía > Retro, o buscarlo por nombre. La fuente es gratuita para uso personal, pero debe ponerse en contacto con el diseñador para uso comercial.
    • - -
    • [1001 Fonts]( 3 ): Este es un sitio web que ofrece fuentes gratuitas y premium, con más de 30.000 fuentes para elegir. Puede encontrar Betty Noir Font en la categoría Retro, o buscarlo por nombre. La fuente es gratuita para uso personal, pero no gratuita para uso comercial.
    • -
    -

    Antes de descargar cualquier fuente, asegúrese de leer los términos de la licencia cuidadosamente y seguirlos en consecuencia. Algunas fuentes pueden requerir atribución, donación o permiso del diseñador para ciertos usos.

    -

    Cómo instalar y aplicar Betty Noir Font en HTML

    -

    Una vez que haya descargado Betty Noir Font desde una fuente confiable, debe instalarlo en su computadora y aplicarlo en su código HTML. Estos son los pasos para hacerlo:

    -

    -
      -
    1. Descomprima el archivo de fuente y guárdelo en una carpeta en su computadora.
    2. -
    3. Abra el Panel de control y vaya a Fuentes. Arrastre y suelte el archivo de fuente en la carpeta Fuentes. Alternativamente, puede hacer clic derecho en el archivo de fuente y seleccionar Instalar.
    4. -
    5. Abra su editor HTML y cree un nuevo archivo o abra uno existente.
    6. -
    7. Agregue el siguiente código a la sección principal de su archivo HTML:
      @font-face {
        font-family: 'Betty Noir';
       nbsp;src: url('bettynoir.ttf');

      >face named Betty Noir and links it to the font file that you saved on your computer. Asegúrese de usar la ruta y el nombre correctos del archivo de fuente.
    8. -
    9. Agregue el siguiente código a la sección de estilo de su archivo HTML o en un archivo CSS separado:
      h1
        font-family: 'Betty Noir', sans-serif;

      Este código aplica la fuente Betty Noir a todos los elementos h1 en su archivo HTML. También puede usar otros selectores o propiedades para personalizar su estilo de fuente.
    10. -
    11. Guarda tu archivo HTML y ábrelo en un navegador para ver el resultado.
    12. -
    - -

    Ejemplos de Betty Noir Font en acción

    -

    Para darle alguna inspiración e ideas sobre cómo usar Betty Noir Font en sus proyectos, aquí hay algunos ejemplos de sitios web y obras de arte gráficas que utilizan esta fuente:

    -

    Sitios web que utilizan Betty Noir Font

    -
      -
    • [The Black Dahlia Murder]( 8 ): Este es un sitio web dedicado al caso de asesinato sin resolver de Elizabeth Short, también conocida como Black Dahlia, que fue asesinada en 1947 en Los Ángeles. El sitio web utiliza Betty Noir Font para su logotipo y titulares, creando una atmósfera noir.
    • -
    • [The Vintage News]( 9 ): Este es un sitio web que cubre historias y hechos de historia, cultura, ciencia, arte y más. El sitio web utiliza Betty Noir Font para su logotipo y algunos de sus titulares, dándole un aspecto retro y elegante.
    • -
    • [El arte de la hombría]( 10 ): Este es un sitio web que ofrece artículos, podcasts, videos y libros sobre temas relacionados con el estilo de vida de los hombres, como el aseo, la aptitud, el estilo, las relaciones, las habilidades y más. El sitio web utiliza Betty Noir Font para su logotipo y algunos de sus titulares, añadiendo algo de encanto vintage y personalidad.
    • -
    -

    Obras de arte gráficas que utilizan Betty Noir Font

    -
      -
    • [Noir Movie Poster]( 11 ): Esta es una obra gráfica creada por [Nate Piekos]( 12 ), el diseñador de Betty Noir Font. Es un póster de una película de cine negro llamado The Big Sleepover. Utiliza Betty Noir Font para el título y los créditos, junto con otras fuentes e imágenes para crear un póster realista y cautivador.
    • -
    • [Noir Book Cover]: Esta es una obra gráfica creada por [Jenny Zemanek], una diseñadora de portadas de libros. Es la portada de una novela negra llamada The Big Nothing de Paul D. Brazill. Utiliza Betty Noir Font para el título y el nombre del autor, junto con otras fuentes e imágenes para crear una cubierta misteriosa e intrigante.
    • - -
    -

    Conclusión

    -

    Betty Noir Font es una fuente gratuita que puede añadir un toque retro y elegante a sus proyectos. Está inspirado en las clásicas películas de cine negro de Hollywood de las décadas de 1940 y 1950, y tiene un distintivo estilo art déco y guion. Puede descargar Betty Noir Font desde varios sitios web, pero asegúrese de usar fuentes confiables y legales. También puede instalar y aplicar Betty Noir Font en su código HTML con algunos pasos simples. Betty Noir Font se puede usar para varios propósitos, como titulares, logotipos, carteles, pancartas, etc. También puede combinarlo con fuentes complementarias, colores, imágenes y temas para crear diseños impresionantes. Esperamos que este artículo te haya dado información útil e inspiración sobre cómo usar Betty Noir Font en tus proyectos.

    -

    Resumen de los puntos principales

    -
      -
    • Betty Noir Font es una fuente gratuita creada por Nate Piekos de Blambot Fonts en 2004.
    • -
    • Betty Noir Font está inspirada en las clásicas películas noir de Hollywood de los años 40 y 50, y tiene un aspecto retro y elegante.
    • -
    • Betty Noir Font se puede descargar desde varios sitios web, pero siempre debe utilizar fuentes legales y de buena reputación.
    • -
    • Betty Noir Font se puede instalar y aplicar en su código HTML con algunos pasos simples.
    • -
    • Betty Noir Font se puede utilizar para diversos fines, como titulares, logotipos, carteles, banners, etc.
    • -
    • Betty Noir Font se puede combinar con fuentes complementarias, colores, imágenes y temas para crear diseños impresionantes.
    • -
    -

    Llamada a la acción y retroalimentación

    - -

    Preguntas frecuentes

    -

    Aquí están algunas de las preguntas más frecuentes sobre Betty Noir Font:

    -
      -
    • Q: ¿Es Betty Noir fuente libre?
      A: Sí, Betty Noir Font es gratis para uso personal, pero es posible que tenga que ponerse en contacto con el diseñador para uso comercial.
    • -
    • P: ¿Qué tipo de fuente es Betty Noir Font?
      A: Betty Noir Font es una fuente retro inspirada en las clásicas películas de cine negro de Hollywood de los años 40 y 50.
    • -
    • P: ¿Cómo descargo Betty Noir Font?
      A: Puede descargar Betty Noir Font desde varios sitios web, pero asegúrese de usar fuentes confiables y legales.
    • -
    • Q: ¿Cómo puedo instalar y aplicar Betty Noir Font en HTML?
      A: Necesitas descomprimir el archivo de fuente, guardarlo en una carpeta en tu computadora, instalarlo en tu carpeta de fuentes, definirlo en tu código HTML usando @font-face, y aplicarlo a tus elementos usando font-family.
    • -
    • P: ¿Cómo uso Betty Noir Font en mis diseños?
      A: Puede usar Betty Noir Font para varios propósitos, como titulares, logotipos, carteles, banners, etc. También puede combinarlo con fuentes complementarias, colores, imágenes y temas para crear diseños impresionantes.
    • -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Cubic 234 Juegos De Jugadores Mod Apk.md b/spaces/Benson/text-generation/Examples/Cubic 234 Juegos De Jugadores Mod Apk.md deleted file mode 100644 index 1cabe063bde4de5e598252924817d82922d74e99..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cubic 234 Juegos De Jugadores Mod Apk.md +++ /dev/null @@ -1,86 +0,0 @@ -
    -

    Cubic 234 Juegos de jugadores Mod APK: Disfruta de la diversión y adictivo Mini juegos con amigos

    -

    Introducción

    -

    ¿Te encanta jugar mini juegos con tus amigos y familiares? ¿Quieres divertirte con juegos sencillos y emocionantes que puedas disfrutar en tu dispositivo móvil? Si usted respondió que sí, entonces usted debe definitivamente echa un vistazo Cubic 234 Player Games Mod APK, una colección de increíbles mini juegos que se puede jugar con hasta 6 jugadores en un solo partido.

    -

    cubic 234 juegos de jugadores mod apk


    Download Zip ––– https://bltlly.com/2v6JfX



    -

    ¿Qué son los juegos de jugador Cubic 234?

    -

    Cubic 234 Player Games es un juego para móviles desarrollado por CubeCube Sports, un estudio especializado en crear juegos divertidos y adictivos para todas las edades. El juego cuenta con varios mini juegos que puedes jugar con 2, 3 o 4 jugadores en el mismo dispositivo. También puedes unirte al modo torneo y competir con hasta 6 jugadores online. Los mini juegos son simples pero desafiantes, y pondrán a prueba tus habilidades, reflejos y coordinación. Algunos de los mini juegos incluyen:

    -
      -
    • Snake Arena: Controla tu serpiente y tratar de comer tantas manzanas como sea posible evitando las paredes y otras serpientes.
    • -
    • ludo: un clásico juego de mesa donde tienes que mover tus cuatro piezas al centro del tablero antes que tus oponentes.
    • -
    • Fútbol: Un juego de fútbol donde tienes que anotar más goles que tu oponente pateando la pelota en la red.
    • -
    • batalla de tanques: un juego de tanques donde tienes que disparar el tanque de tu enemigo mientras esquiva sus balas.
    • -
    • Sumo Wrestling: Un juego de lucha libre donde tienes que empujar a tu oponente fuera del ring usando tu peso corporal.
    • -
    • ¡Y muchos más!
    • -
    -

    ¿Por qué descargar Cubic 234 Player Games Mod APK?

    -

    Si se está preguntando por qué debe descargar Cubic 234 Player Games Mod APK en lugar de la versión original de la Google Play Store, aquí hay algunas razones:

    -
      -
    • Obtendrá dinero ilimitado que puede utilizar para comprar nuevas pieles, sombreros y accesorios para sus personajes.
    • - -
    • Te librarás de los molestos anuncios que pueden interrumpir tu juego o agotar tu batería.
    • -
    • Obtendrá un mejor rendimiento y estabilidad en su dispositivo ya que la versión modificada está optimizada para dispositivos de gama baja.
    • -
    -

    Características de Cubic 234 Player Games Mod APK

    -

    Múltiples modos de juego y mini juegos para elegir

    -

    Una de las mejores características de Cubic 234 Player Games Mod APK es que ofrece una variedad de modos de juego y mini juegos que puedes jugar con tus amigos. Puedes elegir entre diferentes categorías como acción, árcade, tablero, deportes, rompecabezas y más. También puedes personalizar el nivel de dificultad, el número de rondas y el límite de tiempo para cada juego. Estos son algunos de los modos de juego que puedes disfrutar:

    -

    -

    modo de jugador 2

    -

    Este modo te permite jugar con un amigo en el mismo dispositivo. Puedes elegir entre más de 20 mini juegos diseñados para dos jugadores. Puedes usar uno o dos botones para controlar a tu personaje dependiendo del juego. Algunos de los mini juegos que puedes jugar en este modo son:

    -
      -
    • ping pong: un juego de ping pong donde tienes que golpear la pelota con la paleta y hacer que rebote en el lado de tu oponente.
    • -
    • Tijeras de papel de roca: un juego clásico donde tienes que elegir entre piedra, papel o tijeras y ver quién gana.
    • -
    • examen de matemáticas: un juego de matemáticas donde tienes que resolver ecuaciones simples más rápido que tu oponente.
    • -
    • ¡Y muchos más!
    • -
    -

    3 Modo reproductor

    -

    Este modo te permite jugar con dos amigos en el mismo dispositivo. Puedes elegir entre más de 10 mini juegos diseñados para tres jugadores. Puedes usar uno o dos botones para controlar a tu personaje dependiendo del juego. Algunos de los mini juegos que puedes jugar en este modo son:

    -
      -
    • Tron: un juego tron donde tienes que conducir tu ciclo de luz y evitar chocar contra las paredes o los senderos de otros jugadores.
    • - -
    • carrera: un juego de carreras donde tienes que tocar el botón lo más rápido posible para hacer que su coche se mueva más rápido y llegar a la línea de meta primero.
    • -
    • ¡Y muchos más!
    • -
    -

    4 Modo reproductor

    -

    Este modo te permite jugar con tres amigos en el mismo dispositivo. Puedes elegir entre más de 10 mini juegos diseñados para cuatro jugadores. Puedes usar uno o dos botones para controlar a tu personaje dependiendo del juego. Algunos de los mini juegos que puedes jugar en este modo son:

    -
      -
    • escuadrón de bombas: un juego de bombas donde tienes que desactivar la bomba antes de que explote cortando el cable derecho.
    • -
    • Poker: Un juego de póquer donde tienes que apostar, farolear y ganar con la mejor mano de cartas.
    • -
    • Memoria: Un juego de memoria donde tienes que hacer coincidir pares de cartas al voltearlas y recordar sus ubicaciones.
    • -
    • ¡Y muchos más!
    • -
    -

    Modo de torneo

    -

    Este modo le permite jugar con hasta 6 jugadores en línea. Puede unirse o crear una habitación e invitar a sus amigos o jugadores al azar a unirse. También puedes chatear con otros jugadores y enviar emojis. Puedes elegir entre más de 30 minijuegos seleccionados al azar para cada ronda. El jugador con más puntos al final del torneo gana.

    -

    Controles simples e intuitivos para un juego fácil

    -

    Otra gran característica de Cubic 234 Player Games Mod APK es que tiene controles simples e intuitivos que hacen que sea fácil para cualquiera jugar. No necesitas gestos complicados ni golpes para controlar a tu personaje. Solo tienes que pulsar uno o dos botones dependiendo del juego. Los botones son grandes y sensibles, y se colocan en las esquinas de la pantalla para que no interfieran con el juego. También puede ajustar los ajustes de sensibilidad y vibración según su preferencia.

    -

    Gráficos coloridos y caricaturescos para un estado de ánimo alegre

    - -

    Dinero ilimitado y funciones desbloqueadas para más diversión

    -

    La mejor característica de Cubic 234 Player Games Mod APK es que le da dinero ilimitado y desbloqueado características que hacen que el juego más divertido y agradable. Con dinero ilimitado, puedes comprar nuevas pieles, sombreros y accesorios para tus personajes. También puedes desbloquear todos los mini juegos y modos de juego sin tener que gastar dinero real o ver anuncios. También puedes eliminar todos los anuncios que puedan molestarte o ralentizar tu dispositivo. Con estas características, puede tener más opciones y personalización para su juego.

    -

    Cómo descargar e instalar Cubic 234 Player Games Mod APK

    -

    Si desea descargar e instalar Cubic 234 Player Games Mod APK en su dispositivo, aquí están los pasos que debe seguir:

    -

    Paso 1: Descargar el archivo APK de una fuente de confianza

    -

    El primer paso es descargar el archivo APK de una fuente de confianza. Puede utilizar el siguiente enlace para descargar la última versión de Cubic 234 Player Games Mod APK gratis. Asegúrate de tener suficiente espacio de almacenamiento en tu dispositivo antes de descargar el archivo.

    -

    Descargar Cubic 234 Juegos de Jugadores Mod APK

    -

    Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo

    -

    El segundo paso es habilitar fuentes desconocidas en la configuración de su dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo y busque la opción de seguridad o privacidad. Luego, busque la opción de fuentes desconocidas y conéctela. Puede ver un mensaje de advertencia que le indica los riesgos de instalar aplicaciones desconocidas, pero puede ignorarlo y continuar.

    -

    Paso 3: Instalar el archivo APK y lanzar el juego

    - -

    Conclusión

    -

    Cubic 234 Player Games Mod APK es un juego divertido y adictivo que puedes jugar con tus amigos en tu dispositivo móvil. Ofrece una variedad de mini juegos que puedes jugar con 2, 3 o 4 jugadores en el mismo dispositivo, o con hasta 6 jugadores en línea. El juego tiene controles simples e intuitivos, gráficos coloridos y de dibujos animados, dinero ilimitado y funciones desbloqueadas. Puede descargar e instalar Cubic 234 Player Games Mod APK gratis siguiendo los pasos anteriores. Entonces, ¿qué estás esperando? ¡Descarga Cubic 234 Player Games Mod APK ahora y diviértete con tus amigos!

    -

    Preguntas frecuentes

    -

    Aquí están algunas de las preguntas más frecuentes sobre Cubic 234 Player Games Mod APK:

    -
      -
    • Q: ¿Es Cubic 234 Player Games Mod APK seguro de usar?
    • -
    • A: Sí, Cubic 234 Player Games Mod APK es seguro de usar, siempre y cuando se descarga desde una fuente de confianza. La versión modificada no contiene ningún virus o malware que pueda dañar su dispositivo o datos.
    • -
    • Q: ¿Necesito una conexión a Internet para jugar Cubic 234 Player Games Mod APK?
    • -
    • A: No, no necesitas una conexión a Internet para jugar Cubic 234 Player Games Mod APK. Puedes jugar sin conexión con tus amigos en el mismo dispositivo. Sin embargo, si quieres jugar online con otros jugadores, necesitarás una conexión a Internet.
    • -
    • Q: ¿Cuántos mini juegos están disponibles en Cubic 234 Player Games Mod APK?
    • -
    • A: Hay más de 60 mini juegos disponibles en Cubic 234 Player Games Mod APK, dividido en diferentes categorías como acción, árcade, tablero, deportes, rompecabezas y más. Puedes jugar a todos ellos sin tener que desbloquearlos o pagar por ellos.
    • -
    • Q: ¿Puedo personalizar mi personaje en Cubic 234 Player Games Mod APK?
    • -
    • A: Sí, puede personalizar su personaje en Cubic 234 Player Games Mod APK comprando nuevas pieles, sombreros y accesorios con dinero ilimitado. También puedes cambiar el color de tu personaje según tu preferencia.
    • - -
    • A: Sí, puede chatear con otros jugadores en Cubic 234 Player Games Mod APK mediante el uso de la función de chat en el juego. También puede enviar emojis y pegatinas para expresar sus emociones.
    • -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/jmespath/ast.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/jmespath/ast.py deleted file mode 100644 index dd56c6ed6bf2a172fcb684a51fc0137881222774..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/jmespath/ast.py +++ /dev/null @@ -1,90 +0,0 @@ -# AST nodes have this structure: -# {"type": ", children: [], "value": ""} - - -def comparator(name, first, second): - return {'type': 'comparator', 'children': [first, second], 'value': name} - - -def current_node(): - return {'type': 'current', 'children': []} - - -def expref(expression): - return {'type': 'expref', 'children': [expression]} - - -def function_expression(name, args): - return {'type': 'function_expression', 'children': args, 'value': name} - - -def field(name): - return {"type": "field", "children": [], "value": name} - - -def filter_projection(left, right, comparator): - return {'type': 'filter_projection', 'children': [left, right, comparator]} - - -def flatten(node): - return {'type': 'flatten', 'children': [node]} - - -def identity(): - return {"type": "identity", 'children': []} - - -def index(index): - return {"type": "index", "value": index, "children": []} - - -def index_expression(children): - return {"type": "index_expression", 'children': children} - - -def key_val_pair(key_name, node): - return {"type": "key_val_pair", 'children': [node], "value": key_name} - - -def literal(literal_value): - return {'type': 'literal', 'value': literal_value, 'children': []} - - -def multi_select_dict(nodes): - return {"type": "multi_select_dict", "children": nodes} - - -def multi_select_list(nodes): - return {"type": "multi_select_list", "children": nodes} - - -def or_expression(left, right): - return {"type": "or_expression", "children": [left, right]} - - -def and_expression(left, right): - return {"type": "and_expression", "children": [left, right]} - - -def not_expression(expr): - return {"type": "not_expression", "children": [expr]} - - -def pipe(left, right): - return {'type': 'pipe', 'children': [left, right]} - - -def projection(left, right): - return {'type': 'projection', 'children': [left, right]} - - -def subexpression(children): - return {"type": "subexpression", 'children': children} - - -def slice(start, end, step): - return {"type": "slice", "children": [start, end, step]} - - -def value_projection(left, right): - return {'type': 'value_projection', 'children': [left, right]} diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/file_proxy.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/file_proxy.py deleted file mode 100644 index 4b0b0da6c2a62b2b1468c35ddd69f1bbb9b91aa8..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/file_proxy.py +++ /dev/null @@ -1,57 +0,0 @@ -import io -from typing import IO, TYPE_CHECKING, Any, List - -from .ansi import AnsiDecoder -from .text import Text - -if TYPE_CHECKING: - from .console import Console - - -class FileProxy(io.TextIOBase): - """Wraps a file (e.g. sys.stdout) and redirects writes to a console.""" - - def __init__(self, console: "Console", file: IO[str]) -> None: - self.__console = console - self.__file = file - self.__buffer: List[str] = [] - self.__ansi_decoder = AnsiDecoder() - - @property - def rich_proxied_file(self) -> IO[str]: - """Get proxied file.""" - return self.__file - - def __getattr__(self, name: str) -> Any: - return getattr(self.__file, name) - - def write(self, text: str) -> int: - if not isinstance(text, str): - raise TypeError(f"write() argument must be str, not {type(text).__name__}") - buffer = self.__buffer - lines: List[str] = [] - while text: - line, new_line, text = text.partition("\n") - if new_line: - lines.append("".join(buffer) + line) - buffer.clear() - else: - buffer.append(line) - break - if lines: - console = self.__console - with console: - output = Text("\n").join( - self.__ansi_decoder.decode_line(line) for line in lines - ) - console.print(output) - return len(text) - - def flush(self) -> None: - output = "".join(self.__buffer) - if output: - self.__console.print(output) - del self.__buffer[:] - - def fileno(self) -> int: - return self.__file.fileno() diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/_itertools.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/_itertools.py deleted file mode 100644 index cce05582ffc6fe6d72027194f4ccc44ee42f1fcd..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/_itertools.py +++ /dev/null @@ -1,35 +0,0 @@ -from itertools import filterfalse - -from typing import ( - Callable, - Iterable, - Iterator, - Optional, - Set, - TypeVar, - Union, -) - -# Type and type variable definitions -_T = TypeVar('_T') -_U = TypeVar('_U') - - -def unique_everseen( - iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None -) -> Iterator[_T]: - "List unique elements, preserving order. Remember all elements ever seen." - # unique_everseen('AAAABBBCCDAABBB') --> A B C D - # unique_everseen('ABBCcAD', str.lower) --> A B C D - seen: Set[Union[_T, _U]] = set() - seen_add = seen.add - if key is None: - for element in filterfalse(seen.__contains__, iterable): - seen_add(element) - yield element - else: - for element in iterable: - k = key(element) - if k not in seen: - seen_add(k) - yield element diff --git a/spaces/Binguii/Venus_Proxy/Dockerfile b/spaces/Binguii/Venus_Proxy/Dockerfile deleted file mode 100644 index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000 --- a/spaces/Binguii/Venus_Proxy/Dockerfile +++ /dev/null @@ -1,21 +0,0 @@ -FROM node:18-bullseye-slim - -RUN apt-get update && \ - -apt-get install -y git - -RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app - -WORKDIR /app - -RUN npm install - -COPY Dockerfile greeting.md* .env* ./ - -RUN npm run build - -EXPOSE 7860 - -ENV NODE_ENV=production - -CMD [ "npm", "start" ] \ No newline at end of file diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/README.md b/spaces/CVPR/Dual-Key_Backdoor_Attacks/README.md deleted file mode 100644 index 4b06346f6c05a9718d0f4a16b6971c710e4db11b..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Dual-Key Backdoor Attacks -emoji: 🔑 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.0.17 -app_file: app.py -pinned: false -license: gpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/build_loader.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/build_loader.py deleted file mode 100644 index 7878c335481881e8d2785da796faaef99966794e..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/grid-feats-vqa/grid_feats/build_loader.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import logging -import operator -import torch.utils.data - -from detectron2.utils.comm import get_world_size -from detectron2.data import samplers -from detectron2.data.build import get_detection_dataset_dicts, worker_init_reset_seed, trivial_batch_collator -from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, MapDataset - -from .dataset_mapper import AttributeDatasetMapper - - -def build_detection_train_loader_with_attributes(cfg, mapper=None): - num_workers = get_world_size() - images_per_batch = cfg.SOLVER.IMS_PER_BATCH - assert ( - images_per_batch % num_workers == 0 - ), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format( - images_per_batch, num_workers - ) - assert ( - images_per_batch >= num_workers - ), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format( - images_per_batch, num_workers - ) - images_per_worker = images_per_batch // num_workers - - dataset_dicts = get_detection_dataset_dicts( - cfg.DATASETS.TRAIN, - filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS, - min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE - if cfg.MODEL.KEYPOINT_ON - else 0, - proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None, - ) - dataset = DatasetFromList(dataset_dicts, copy=False) - - if mapper is None: - mapper = AttributeDatasetMapper(cfg, True) - dataset = MapDataset(dataset, mapper) - - sampler_name = cfg.DATALOADER.SAMPLER_TRAIN - logger = logging.getLogger(__name__) - logger.info("Using training sampler {}".format(sampler_name)) - if sampler_name == "TrainingSampler": - sampler = samplers.TrainingSampler(len(dataset)) - elif sampler_name == "RepeatFactorTrainingSampler": - sampler = samplers.RepeatFactorTrainingSampler( - dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD - ) - else: - raise ValueError("Unknown training sampler: {}".format(sampler_name)) - - if cfg.DATALOADER.ASPECT_RATIO_GROUPING: - data_loader = torch.utils.data.DataLoader( - dataset, - sampler=sampler, - num_workers=cfg.DATALOADER.NUM_WORKERS, - batch_sampler=None, - collate_fn=operator.itemgetter(0), - worker_init_fn=worker_init_reset_seed, - ) - data_loader = AspectRatioGroupedDataset(data_loader, images_per_worker) - else: - batch_sampler = torch.utils.data.sampler.BatchSampler( - sampler, images_per_worker, drop_last=True - ) - data_loader = torch.utils.data.DataLoader( - dataset, - num_workers=cfg.DATALOADER.NUM_WORKERS, - batch_sampler=batch_sampler, - collate_fn=trivial_batch_collator, - worker_init_fn=worker_init_reset_seed, - ) - - return data_loader - - -def build_detection_test_loader_with_attributes(cfg, dataset_name, mapper=None): - dataset_dicts = get_detection_dataset_dicts( - [dataset_name], - filter_empty=False, - proposal_files=[ - cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)] - ] - if cfg.MODEL.LOAD_PROPOSALS - else None, - ) - - dataset = DatasetFromList(dataset_dicts) - if mapper is None: - mapper = AttributeDatasetMapper(cfg, False) - dataset = MapDataset(dataset, mapper) - - sampler = samplers.InferenceSampler(len(dataset)) - batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False) - - data_loader = torch.utils.data.DataLoader( - dataset, - num_workers=cfg.DATALOADER.NUM_WORKERS, - batch_sampler=batch_sampler, - collate_fn=trivial_batch_collator, - ) - return data_loader \ No newline at end of file diff --git a/spaces/CVPR/WALT/mmdet/models/backbones/hourglass.py b/spaces/CVPR/WALT/mmdet/models/backbones/hourglass.py deleted file mode 100644 index 3422acee35e3c6f8731cdb310f188e671b5be12f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/backbones/hourglass.py +++ /dev/null @@ -1,198 +0,0 @@ -import torch.nn as nn -from mmcv.cnn import ConvModule - -from ..builder import BACKBONES -from ..utils import ResLayer -from .resnet import BasicBlock - - -class HourglassModule(nn.Module): - """Hourglass Module for HourglassNet backbone. - - Generate module recursively and use BasicBlock as the base unit. - - Args: - depth (int): Depth of current HourglassModule. - stage_channels (list[int]): Feature channels of sub-modules in current - and follow-up HourglassModule. - stage_blocks (list[int]): Number of sub-modules stacked in current and - follow-up HourglassModule. - norm_cfg (dict): Dictionary to construct and config norm layer. - """ - - def __init__(self, - depth, - stage_channels, - stage_blocks, - norm_cfg=dict(type='BN', requires_grad=True)): - super(HourglassModule, self).__init__() - - self.depth = depth - - cur_block = stage_blocks[0] - next_block = stage_blocks[1] - - cur_channel = stage_channels[0] - next_channel = stage_channels[1] - - self.up1 = ResLayer( - BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg) - - self.low1 = ResLayer( - BasicBlock, - cur_channel, - next_channel, - cur_block, - stride=2, - norm_cfg=norm_cfg) - - if self.depth > 1: - self.low2 = HourglassModule(depth - 1, stage_channels[1:], - stage_blocks[1:]) - else: - self.low2 = ResLayer( - BasicBlock, - next_channel, - next_channel, - next_block, - norm_cfg=norm_cfg) - - self.low3 = ResLayer( - BasicBlock, - next_channel, - cur_channel, - cur_block, - norm_cfg=norm_cfg, - downsample_first=False) - - self.up2 = nn.Upsample(scale_factor=2) - - def forward(self, x): - """Forward function.""" - up1 = self.up1(x) - low1 = self.low1(x) - low2 = self.low2(low1) - low3 = self.low3(low2) - up2 = self.up2(low3) - return up1 + up2 - - -@BACKBONES.register_module() -class HourglassNet(nn.Module): - """HourglassNet backbone. - - Stacked Hourglass Networks for Human Pose Estimation. - More details can be found in the `paper - `_ . - - Args: - downsample_times (int): Downsample times in a HourglassModule. - num_stacks (int): Number of HourglassModule modules stacked, - 1 for Hourglass-52, 2 for Hourglass-104. - stage_channels (list[int]): Feature channel of each sub-module in a - HourglassModule. - stage_blocks (list[int]): Number of sub-modules stacked in a - HourglassModule. - feat_channel (int): Feature channel of conv after a HourglassModule. - norm_cfg (dict): Dictionary to construct and config norm layer. - - Example: - >>> from mmdet.models import HourglassNet - >>> import torch - >>> self = HourglassNet() - >>> self.eval() - >>> inputs = torch.rand(1, 3, 511, 511) - >>> level_outputs = self.forward(inputs) - >>> for level_output in level_outputs: - ... print(tuple(level_output.shape)) - (1, 256, 128, 128) - (1, 256, 128, 128) - """ - - def __init__(self, - downsample_times=5, - num_stacks=2, - stage_channels=(256, 256, 384, 384, 384, 512), - stage_blocks=(2, 2, 2, 2, 2, 4), - feat_channel=256, - norm_cfg=dict(type='BN', requires_grad=True)): - super(HourglassNet, self).__init__() - - self.num_stacks = num_stacks - assert self.num_stacks >= 1 - assert len(stage_channels) == len(stage_blocks) - assert len(stage_channels) > downsample_times - - cur_channel = stage_channels[0] - - self.stem = nn.Sequential( - ConvModule(3, 128, 7, padding=3, stride=2, norm_cfg=norm_cfg), - ResLayer(BasicBlock, 128, 256, 1, stride=2, norm_cfg=norm_cfg)) - - self.hourglass_modules = nn.ModuleList([ - HourglassModule(downsample_times, stage_channels, stage_blocks) - for _ in range(num_stacks) - ]) - - self.inters = ResLayer( - BasicBlock, - cur_channel, - cur_channel, - num_stacks - 1, - norm_cfg=norm_cfg) - - self.conv1x1s = nn.ModuleList([ - ConvModule( - cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) - for _ in range(num_stacks - 1) - ]) - - self.out_convs = nn.ModuleList([ - ConvModule( - cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg) - for _ in range(num_stacks) - ]) - - self.remap_convs = nn.ModuleList([ - ConvModule( - feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None) - for _ in range(num_stacks - 1) - ]) - - self.relu = nn.ReLU(inplace=True) - - def init_weights(self, pretrained=None): - """Init module weights. - - We do nothing in this function because all modules we used - (ConvModule, BasicBlock and etc.) have default initialization, and - currently we don't provide pretrained model of HourglassNet. - - Detector's __init__() will call backbone's init_weights() with - pretrained as input, so we keep this function. - """ - # Training Centripetal Model needs to reset parameters for Conv2d - for m in self.modules(): - if isinstance(m, nn.Conv2d): - m.reset_parameters() - - def forward(self, x): - """Forward function.""" - inter_feat = self.stem(x) - out_feats = [] - - for ind in range(self.num_stacks): - single_hourglass = self.hourglass_modules[ind] - out_conv = self.out_convs[ind] - - hourglass_feat = single_hourglass(inter_feat) - out_feat = out_conv(hourglass_feat) - out_feats.append(out_feat) - - if ind < self.num_stacks - 1: - inter_feat = self.conv1x1s[ind]( - inter_feat) + self.remap_convs[ind]( - out_feat) - inter_feat = self.inters[ind](self.relu(inter_feat)) - - return out_feats diff --git a/spaces/CVPR/WALT/walt/apis/__init__.py b/spaces/CVPR/WALT/walt/apis/__init__.py deleted file mode 100644 index bdd0a928ef5b579f84d0ea7946cb0fea3abcf9f0..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/walt/apis/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .train import get_root_logger, set_random_seed, train_detector - - -__all__ = [ - 'get_root_logger', 'set_random_seed', 'train_detector' -] diff --git a/spaces/CVPR/WALT/walt/datasets/pipelines/test_time_aug.py b/spaces/CVPR/WALT/walt/datasets/pipelines/test_time_aug.py deleted file mode 100644 index b6226e040499882c99f15594c66ebf3d07829168..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/walt/datasets/pipelines/test_time_aug.py +++ /dev/null @@ -1,119 +0,0 @@ -import warnings - -import mmcv - -from ..builder import PIPELINES -from .compose import Compose - - -@PIPELINES.register_module() -class MultiScaleFlipAug(object): - """Test-time augmentation with multiple scales and flipping. - - An example configuration is as followed: - - .. code-block:: - - img_scale=[(1333, 400), (1333, 800)], - flip=True, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ] - - After MultiScaleFLipAug with above configuration, the results are wrapped - into lists of the same length as followed: - - .. code-block:: - - dict( - img=[...], - img_shape=[...], - scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)] - flip=[False, True, False, True] - ... - ) - - Args: - transforms (list[dict]): Transforms to apply in each augmentation. - img_scale (tuple | list[tuple] | None): Images scales for resizing. - scale_factor (float | list[float] | None): Scale factors for resizing. - flip (bool): Whether apply flip augmentation. Default: False. - flip_direction (str | list[str]): Flip augmentation directions, - options are "horizontal" and "vertical". If flip_direction is list, - multiple flip augmentations will be applied. - It has no effect when flip == False. Default: "horizontal". - """ - - def __init__(self, - transforms, - img_scale=None, - scale_factor=None, - flip=False, - flip_direction='horizontal'): - self.transforms = Compose(transforms) - assert (img_scale is None) ^ (scale_factor is None), ( - 'Must have but only one variable can be setted') - if img_scale is not None: - self.img_scale = img_scale if isinstance(img_scale, - list) else [img_scale] - self.scale_key = 'scale' - assert mmcv.is_list_of(self.img_scale, tuple) - else: - self.img_scale = scale_factor if isinstance( - scale_factor, list) else [scale_factor] - self.scale_key = 'scale_factor' - - self.flip = flip - self.flip_direction = flip_direction if isinstance( - flip_direction, list) else [flip_direction] - assert mmcv.is_list_of(self.flip_direction, str) - if not self.flip and self.flip_direction != ['horizontal']: - warnings.warn( - 'flip_direction has no effect when flip is set to False') - if (self.flip - and not any([t['type'] == 'RandomFlip' for t in transforms])): - warnings.warn( - 'flip has no effect when RandomFlip is not in transforms') - - def __call__(self, results): - """Call function to apply test time augment transforms on results. - - Args: - results (dict): Result dict contains the data to transform. - - Returns: - dict[str: list]: The augmented data, where each value is wrapped - into a list. - """ - - aug_data = [] - flip_args = [(False, None)] - if self.flip: - flip_args += [(True, direction) - for direction in self.flip_direction] - for scale in self.img_scale: - for flip, direction in flip_args: - _results = results.copy() - _results[self.scale_key] = scale - _results['flip'] = flip - _results['flip_direction'] = direction - data = self.transforms(_results) - aug_data.append(data) - # list of dict to dict of list - aug_data_dict = {key: [] for key in aug_data[0]} - for data in aug_data: - for key, val in data.items(): - aug_data_dict[key].append(val) - return aug_data_dict - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(transforms={self.transforms}, ' - repr_str += f'img_scale={self.img_scale}, flip={self.flip}, ' - repr_str += f'flip_direction={self.flip_direction})' - return repr_str diff --git a/spaces/CVPR/v-doc_abstractive_mac/extract_feature.py b/spaces/CVPR/v-doc_abstractive_mac/extract_feature.py deleted file mode 100644 index ab5f45ba0116a9b8d8862e42e16008171a78ffd2..0000000000000000000000000000000000000000 --- a/spaces/CVPR/v-doc_abstractive_mac/extract_feature.py +++ /dev/null @@ -1,51 +0,0 @@ -import argparse, os, json -import numpy as np -from imageio import imread -from PIL import Image - -import torch -import torchvision -import ssl -ssl._create_default_https_context = ssl._create_unverified_context - - -def build_model(model='resnet101', model_stage=3): - cnn = getattr(torchvision.models, model)(pretrained=True) - layers = [ - cnn.conv1, - cnn.bn1, - cnn.relu, - cnn.maxpool, - ] - for i in range(model_stage): - name = 'layer%d' % (i + 1) - layers.append(getattr(cnn, name)) - model = torch.nn.Sequential(*layers) - # model.cuda() - model.eval() - return model - - -def run_image(img, model): - mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1) - std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1) - - image = np.concatenate([img], 0).astype(np.float32) - image = (image / 255.0 - mean) / std - image = torch.FloatTensor(image) - image = torch.autograd.Variable(image, volatile=True) - - feats = model(image) - feats = feats.data.cpu().clone().numpy() - - return feats - - -def get_img_feat(cnn_model, img, image_height=224, image_width=224): - img_size = (image_height, image_width) - img = np.array(Image.fromarray(np.uint8(img)).resize(img_size)) - img = img.transpose(2, 0, 1)[None] - feats = run_image(img, cnn_model) - _, C, H, W = feats.shape - feat_dset = feats.reshape(1, C, H, W) - return feat_dset diff --git a/spaces/Cat125/text-generator-v3/README.md b/spaces/Cat125/text-generator-v3/README.md deleted file mode 100644 index be28c1a5cb518fdc324f0483f242b230bea0583b..0000000000000000000000000000000000000000 --- a/spaces/Cat125/text-generator-v3/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Text Generator v3 -emoji: 🐨 -colorFrom: blue -colorTo: indigo -sdk: gradio -sdk_version: 3.27.0 -app_file: main.py -pinned: true -license: openrail -duplicated_from: Cat125/text-generator-v2 ---- - -This tool allows you to generate texts based on given context. \ No newline at end of file diff --git a/spaces/CognitiveLabs/Research-Assistant/agent/llm_utils.py b/spaces/CognitiveLabs/Research-Assistant/agent/llm_utils.py deleted file mode 100644 index 0810a66ba41d76e48c4afe3d123b266f902b2e10..0000000000000000000000000000000000000000 --- a/spaces/CognitiveLabs/Research-Assistant/agent/llm_utils.py +++ /dev/null @@ -1,39 +0,0 @@ -from __future__ import annotations -from config import Config -import openai - -CFG = Config() - -openai.api_key = CFG.openai_api_key -openai.api_base = CFG.openai_api_base - -from typing import Optional - -def llm_response(model, - messages, - temperature: float = CFG.temperature, - max_tokens: Optional[int] = None): - return openai.ChatCompletion.create( - model=model, - messages=messages, - temperature=temperature, - max_tokens=max_tokens, - ).choices[0].message["content"] - - -def llm_stream_response(model, - messages, - temperature: float = CFG.temperature, - max_tokens: Optional[int] = None): - response = "" - for chunk in openai.ChatCompletion.create( - model=model, - messages=messages, - temperature=temperature, - max_tokens=max_tokens, - stream=True, - ): - content = chunk["choices"][0].get("delta", {}).get("content") - if content is not None: - response += content - yield response diff --git "a/spaces/Cong723/gpt-academic-public/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" "b/spaces/Cong723/gpt-academic-public/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" deleted file mode 100644 index 3da831fd07e361a532777c83bb02cff265b94abd..0000000000000000000000000000000000000000 --- "a/spaces/Cong723/gpt-academic-public/crazy_functions/\344\270\213\350\275\275arxiv\350\256\272\346\226\207\347\277\273\350\257\221\346\221\230\350\246\201.py" +++ /dev/null @@ -1,194 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file, get_conf -import re, requests, unicodedata, os -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -def download_arxiv_(url_pdf): - if 'arxiv.org' not in url_pdf: - if ('.' in url_pdf) and ('/' not in url_pdf): - new_url = 'https://arxiv.org/abs/'+url_pdf - print('下载编号:', url_pdf, '自动定位:', new_url) - # download_arxiv_(new_url) - return download_arxiv_(new_url) - else: - print('不能识别的URL!') - return None - if 'abs' in url_pdf: - url_pdf = url_pdf.replace('abs', 'pdf') - url_pdf = url_pdf + '.pdf' - - url_abs = url_pdf.replace('.pdf', '').replace('pdf', 'abs') - title, other_info = get_name(_url_=url_abs) - - paper_id = title.split()[0] # '[1712.00559]' - if '2' in other_info['year']: - title = other_info['year'] + ' ' + title - - known_conf = ['NeurIPS', 'NIPS', 'Nature', 'Science', 'ICLR', 'AAAI'] - for k in known_conf: - if k in other_info['comment']: - title = k + ' ' + title - - download_dir = './gpt_log/arxiv/' - os.makedirs(download_dir, exist_ok=True) - - title_str = title.replace('?', '?')\ - .replace(':', ':')\ - .replace('\"', '“')\ - .replace('\n', '')\ - .replace(' ', ' ')\ - .replace(' ', ' ') - - requests_pdf_url = url_pdf - file_path = download_dir+title_str - # if os.path.exists(file_path): - # print('返回缓存文件') - # return './gpt_log/arxiv/'+title_str - - print('下载中') - proxies, = get_conf('proxies') - r = requests.get(requests_pdf_url, proxies=proxies) - with open(file_path, 'wb+') as f: - f.write(r.content) - print('下载完成') - - # print('输出下载命令:','aria2c -o \"%s\" %s'%(title_str,url_pdf)) - # subprocess.call('aria2c --all-proxy=\"172.18.116.150:11084\" -o \"%s\" %s'%(download_dir+title_str,url_pdf), shell=True) - - x = "%s %s %s.bib" % (paper_id, other_info['year'], other_info['authors']) - x = x.replace('?', '?')\ - .replace(':', ':')\ - .replace('\"', '“')\ - .replace('\n', '')\ - .replace(' ', ' ')\ - .replace(' ', ' ') - return './gpt_log/arxiv/'+title_str, other_info - - -def get_name(_url_): - import os - from bs4 import BeautifulSoup - print('正在获取文献名!') - print(_url_) - - # arxiv_recall = {} - # if os.path.exists('./arxiv_recall.pkl'): - # with open('./arxiv_recall.pkl', 'rb') as f: - # arxiv_recall = pickle.load(f) - - # if _url_ in arxiv_recall: - # print('在缓存中') - # return arxiv_recall[_url_] - - proxies, = get_conf('proxies') - res = requests.get(_url_, proxies=proxies) - - bs = BeautifulSoup(res.text, 'html.parser') - other_details = {} - - # get year - try: - year = bs.find_all(class_='dateline')[0].text - year = re.search(r'(\d{4})', year, re.M | re.I).group(1) - other_details['year'] = year - abstract = bs.find_all(class_='abstract mathjax')[0].text - other_details['abstract'] = abstract - except: - other_details['year'] = '' - print('年份获取失败') - - # get author - try: - authors = bs.find_all(class_='authors')[0].text - authors = authors.split('Authors:')[1] - other_details['authors'] = authors - except: - other_details['authors'] = '' - print('authors获取失败') - - # get comment - try: - comment = bs.find_all(class_='metatable')[0].text - real_comment = None - for item in comment.replace('\n', ' ').split(' '): - if 'Comments' in item: - real_comment = item - if real_comment is not None: - other_details['comment'] = real_comment - else: - other_details['comment'] = '' - except: - other_details['comment'] = '' - print('年份获取失败') - - title_str = BeautifulSoup( - res.text, 'html.parser').find('title').contents[0] - print('获取成功:', title_str) - # arxiv_recall[_url_] = (title_str+'.pdf', other_details) - # with open('./arxiv_recall.pkl', 'wb') as f: - # pickle.dump(arxiv_recall, f) - - return title_str+'.pdf', other_details - - - -@CatchException -def 下载arxiv论文并翻译摘要(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - - CRAZY_FUNCTION_INFO = "下载arxiv论文并翻译摘要,函数插件作者[binary-husky]。正在提取摘要并下载PDF文档……" - import glob - import os - - # 基本信息:功能、贡献者 - chatbot.append(["函数插件功能?", CRAZY_FUNCTION_INFO]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import pdfminer, bs4 - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 清空历史,以免输入溢出 - history = [] - - # 提取摘要,下载PDF文档 - try: - pdf_path, info = download_arxiv_(txt) - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"下载pdf文件未成功") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - # 翻译摘要等 - i_say = f"请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。材料如下:{str(info)}" - i_say_show_user = f'请你阅读以下学术论文相关的材料,提取摘要,翻译为中文。论文:{pdf_path}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - msg = '正常' - # ** gpt request ** - # 单线,获取文章meta信息 - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, history=[], - sys_prompt="Your job is to collect information from materials and translate to Chinese。", - ) - - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - # 写入文件 - import shutil - # 重置文件的创建时间 - shutil.copyfile(pdf_path, f'./gpt_log/{os.path.basename(pdf_path)}'); os.remove(pdf_path) - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res + "\n\nPDF文件也已经下载")) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/streams/stapled.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/streams/stapled.py deleted file mode 100644 index 1b2862e3eac2ae6f18212d312e7cc7c2acdf0c5c..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/streams/stapled.py +++ /dev/null @@ -1,140 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from typing import Any, Callable, Generic, Mapping, Sequence, TypeVar - -from ..abc import ( - ByteReceiveStream, - ByteSendStream, - ByteStream, - Listener, - ObjectReceiveStream, - ObjectSendStream, - ObjectStream, - TaskGroup, -) - -T_Item = TypeVar("T_Item") -T_Stream = TypeVar("T_Stream") - - -@dataclass(eq=False) -class StapledByteStream(ByteStream): - """ - Combines two byte streams into a single, bidirectional byte stream. - - Extra attributes will be provided from both streams, with the receive stream providing the - values in case of a conflict. - - :param ByteSendStream send_stream: the sending byte stream - :param ByteReceiveStream receive_stream: the receiving byte stream - """ - - send_stream: ByteSendStream - receive_stream: ByteReceiveStream - - async def receive(self, max_bytes: int = 65536) -> bytes: - return await self.receive_stream.receive(max_bytes) - - async def send(self, item: bytes) -> None: - await self.send_stream.send(item) - - async def send_eof(self) -> None: - await self.send_stream.aclose() - - async def aclose(self) -> None: - await self.send_stream.aclose() - await self.receive_stream.aclose() - - @property - def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: - return { - **self.send_stream.extra_attributes, - **self.receive_stream.extra_attributes, - } - - -@dataclass(eq=False) -class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]): - """ - Combines two object streams into a single, bidirectional object stream. - - Extra attributes will be provided from both streams, with the receive stream providing the - values in case of a conflict. - - :param ObjectSendStream send_stream: the sending object stream - :param ObjectReceiveStream receive_stream: the receiving object stream - """ - - send_stream: ObjectSendStream[T_Item] - receive_stream: ObjectReceiveStream[T_Item] - - async def receive(self) -> T_Item: - return await self.receive_stream.receive() - - async def send(self, item: T_Item) -> None: - await self.send_stream.send(item) - - async def send_eof(self) -> None: - await self.send_stream.aclose() - - async def aclose(self) -> None: - await self.send_stream.aclose() - await self.receive_stream.aclose() - - @property - def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: - return { - **self.send_stream.extra_attributes, - **self.receive_stream.extra_attributes, - } - - -@dataclass(eq=False) -class MultiListener(Generic[T_Stream], Listener[T_Stream]): - """ - Combines multiple listeners into one, serving connections from all of them at once. - - Any MultiListeners in the given collection of listeners will have their listeners moved into - this one. - - Extra attributes are provided from each listener, with each successive listener overriding any - conflicting attributes from the previous one. - - :param listeners: listeners to serve - :type listeners: Sequence[Listener[T_Stream]] - """ - - listeners: Sequence[Listener[T_Stream]] - - def __post_init__(self) -> None: - listeners: list[Listener[T_Stream]] = [] - for listener in self.listeners: - if isinstance(listener, MultiListener): - listeners.extend(listener.listeners) - del listener.listeners[:] # type: ignore[attr-defined] - else: - listeners.append(listener) - - self.listeners = listeners - - async def serve( - self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None - ) -> None: - from .. import create_task_group - - async with create_task_group() as tg: - for listener in self.listeners: - tg.start_soon(listener.serve, handler, task_group) - - async def aclose(self) -> None: - for listener in self.listeners: - await listener.aclose() - - @property - def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]: - attributes: dict = {} - for listener in self.listeners: - attributes.update(listener.extra_attributes) - - return attributes diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_auth.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_auth.py deleted file mode 100644 index 1d7385d57334c46750d0618a407b49cd829856f4..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_auth.py +++ /dev/null @@ -1,347 +0,0 @@ -import hashlib -import netrc -import os -import re -import time -import typing -from base64 import b64encode -from urllib.request import parse_http_list - -from ._exceptions import ProtocolError -from ._models import Request, Response -from ._utils import to_bytes, to_str, unquote - -if typing.TYPE_CHECKING: # pragma: no cover - from hashlib import _Hash - - -class Auth: - """ - Base class for all authentication schemes. - - To implement a custom authentication scheme, subclass `Auth` and override - the `.auth_flow()` method. - - If the authentication scheme does I/O such as disk access or network calls, or uses - synchronization primitives such as locks, you should override `.sync_auth_flow()` - and/or `.async_auth_flow()` instead of `.auth_flow()` to provide specialized - implementations that will be used by `Client` and `AsyncClient` respectively. - """ - - requires_request_body = False - requires_response_body = False - - def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: - """ - Execute the authentication flow. - - To dispatch a request, `yield` it: - - ``` - yield request - ``` - - The client will `.send()` the response back into the flow generator. You can - access it like so: - - ``` - response = yield request - ``` - - A `return` (or reaching the end of the generator) will result in the - client returning the last response obtained from the server. - - You can dispatch as many requests as is necessary. - """ - yield request - - def sync_auth_flow( - self, request: Request - ) -> typing.Generator[Request, Response, None]: - """ - Execute the authentication flow synchronously. - - By default, this defers to `.auth_flow()`. You should override this method - when the authentication scheme does I/O and/or uses concurrency primitives. - """ - if self.requires_request_body: - request.read() - - flow = self.auth_flow(request) - request = next(flow) - - while True: - response = yield request - if self.requires_response_body: - response.read() - - try: - request = flow.send(response) - except StopIteration: - break - - async def async_auth_flow( - self, request: Request - ) -> typing.AsyncGenerator[Request, Response]: - """ - Execute the authentication flow asynchronously. - - By default, this defers to `.auth_flow()`. You should override this method - when the authentication scheme does I/O and/or uses concurrency primitives. - """ - if self.requires_request_body: - await request.aread() - - flow = self.auth_flow(request) - request = next(flow) - - while True: - response = yield request - if self.requires_response_body: - await response.aread() - - try: - request = flow.send(response) - except StopIteration: - break - - -class FunctionAuth(Auth): - """ - Allows the 'auth' argument to be passed as a simple callable function, - that takes the request, and returns a new, modified request. - """ - - def __init__(self, func: typing.Callable[[Request], Request]) -> None: - self._func = func - - def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: - yield self._func(request) - - -class BasicAuth(Auth): - """ - Allows the 'auth' argument to be passed as a (username, password) pair, - and uses HTTP Basic authentication. - """ - - def __init__( - self, username: typing.Union[str, bytes], password: typing.Union[str, bytes] - ): - self._auth_header = self._build_auth_header(username, password) - - def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: - request.headers["Authorization"] = self._auth_header - yield request - - def _build_auth_header( - self, username: typing.Union[str, bytes], password: typing.Union[str, bytes] - ) -> str: - userpass = b":".join((to_bytes(username), to_bytes(password))) - token = b64encode(userpass).decode() - return f"Basic {token}" - - -class NetRCAuth(Auth): - """ - Use a 'netrc' file to lookup basic auth credentials based on the url host. - """ - - def __init__(self, file: typing.Optional[str] = None): - self._netrc_info = netrc.netrc(file) - - def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: - auth_info = self._netrc_info.authenticators(request.url.host) - if auth_info is None or not auth_info[2]: - # The netrc file did not have authentication credentials for this host. - yield request - else: - # Build a basic auth header with credentials from the netrc file. - request.headers["Authorization"] = self._build_auth_header( - username=auth_info[0], password=auth_info[2] - ) - yield request - - def _build_auth_header( - self, username: typing.Union[str, bytes], password: typing.Union[str, bytes] - ) -> str: - userpass = b":".join((to_bytes(username), to_bytes(password))) - token = b64encode(userpass).decode() - return f"Basic {token}" - - -class DigestAuth(Auth): - _ALGORITHM_TO_HASH_FUNCTION: typing.Dict[str, typing.Callable[[bytes], "_Hash"]] = { - "MD5": hashlib.md5, - "MD5-SESS": hashlib.md5, - "SHA": hashlib.sha1, - "SHA-SESS": hashlib.sha1, - "SHA-256": hashlib.sha256, - "SHA-256-SESS": hashlib.sha256, - "SHA-512": hashlib.sha512, - "SHA-512-SESS": hashlib.sha512, - } - - def __init__( - self, username: typing.Union[str, bytes], password: typing.Union[str, bytes] - ) -> None: - self._username = to_bytes(username) - self._password = to_bytes(password) - self._last_challenge: typing.Optional[_DigestAuthChallenge] = None - self._nonce_count = 1 - - def auth_flow(self, request: Request) -> typing.Generator[Request, Response, None]: - if self._last_challenge: - request.headers["Authorization"] = self._build_auth_header( - request, self._last_challenge - ) - - response = yield request - - if response.status_code != 401 or "www-authenticate" not in response.headers: - # If the response is not a 401 then we don't - # need to build an authenticated request. - return - - for auth_header in response.headers.get_list("www-authenticate"): - if auth_header.lower().startswith("digest "): - break - else: - # If the response does not include a 'WWW-Authenticate: Digest ...' - # header, then we don't need to build an authenticated request. - return - - self._last_challenge = self._parse_challenge(request, response, auth_header) - self._nonce_count = 1 - - request.headers["Authorization"] = self._build_auth_header( - request, self._last_challenge - ) - yield request - - def _parse_challenge( - self, request: Request, response: Response, auth_header: str - ) -> "_DigestAuthChallenge": - """ - Returns a challenge from a Digest WWW-Authenticate header. - These take the form of: - `Digest realm="realm@host.com",qop="auth,auth-int",nonce="abc",opaque="xyz"` - """ - scheme, _, fields = auth_header.partition(" ") - - # This method should only ever have been called with a Digest auth header. - assert scheme.lower() == "digest" - - header_dict: typing.Dict[str, str] = {} - for field in parse_http_list(fields): - key, value = field.strip().split("=", 1) - header_dict[key] = unquote(value) - - try: - realm = header_dict["realm"].encode() - nonce = header_dict["nonce"].encode() - algorithm = header_dict.get("algorithm", "MD5") - opaque = header_dict["opaque"].encode() if "opaque" in header_dict else None - qop = header_dict["qop"].encode() if "qop" in header_dict else None - return _DigestAuthChallenge( - realm=realm, nonce=nonce, algorithm=algorithm, opaque=opaque, qop=qop - ) - except KeyError as exc: - message = "Malformed Digest WWW-Authenticate header" - raise ProtocolError(message, request=request) from exc - - def _build_auth_header( - self, request: Request, challenge: "_DigestAuthChallenge" - ) -> str: - hash_func = self._ALGORITHM_TO_HASH_FUNCTION[challenge.algorithm.upper()] - - def digest(data: bytes) -> bytes: - return hash_func(data).hexdigest().encode() - - A1 = b":".join((self._username, challenge.realm, self._password)) - - path = request.url.raw_path - A2 = b":".join((request.method.encode(), path)) - # TODO: implement auth-int - HA2 = digest(A2) - - nc_value = b"%08x" % self._nonce_count - cnonce = self._get_client_nonce(self._nonce_count, challenge.nonce) - self._nonce_count += 1 - - HA1 = digest(A1) - if challenge.algorithm.lower().endswith("-sess"): - HA1 = digest(b":".join((HA1, challenge.nonce, cnonce))) - - qop = self._resolve_qop(challenge.qop, request=request) - if qop is None: - digest_data = [HA1, challenge.nonce, HA2] - else: - digest_data = [challenge.nonce, nc_value, cnonce, qop, HA2] - key_digest = b":".join(digest_data) - - format_args = { - "username": self._username, - "realm": challenge.realm, - "nonce": challenge.nonce, - "uri": path, - "response": digest(b":".join((HA1, key_digest))), - "algorithm": challenge.algorithm.encode(), - } - if challenge.opaque: - format_args["opaque"] = challenge.opaque - if qop: - format_args["qop"] = b"auth" - format_args["nc"] = nc_value - format_args["cnonce"] = cnonce - - return "Digest " + self._get_header_value(format_args) - - def _get_client_nonce(self, nonce_count: int, nonce: bytes) -> bytes: - s = str(nonce_count).encode() - s += nonce - s += time.ctime().encode() - s += os.urandom(8) - - return hashlib.sha1(s).hexdigest()[:16].encode() - - def _get_header_value(self, header_fields: typing.Dict[str, bytes]) -> str: - NON_QUOTED_FIELDS = ("algorithm", "qop", "nc") - QUOTED_TEMPLATE = '{}="{}"' - NON_QUOTED_TEMPLATE = "{}={}" - - header_value = "" - for i, (field, value) in enumerate(header_fields.items()): - if i > 0: - header_value += ", " - template = ( - QUOTED_TEMPLATE - if field not in NON_QUOTED_FIELDS - else NON_QUOTED_TEMPLATE - ) - header_value += template.format(field, to_str(value)) - - return header_value - - def _resolve_qop( - self, qop: typing.Optional[bytes], request: Request - ) -> typing.Optional[bytes]: - if qop is None: - return None - qops = re.split(b", ?", qop) - if b"auth" in qops: - return b"auth" - - if qops == [b"auth-int"]: - raise NotImplementedError("Digest auth-int support is not yet implemented") - - message = f'Unexpected qop value "{qop!r}" in digest auth' - raise ProtocolError(message, request=request) - - -class _DigestAuthChallenge(typing.NamedTuple): - realm: bytes - nonce: bytes - algorithm: str - opaque: typing.Optional[bytes] - qop: typing.Optional[bytes] diff --git a/spaces/Dantra1/CeliaSensei/attentions.py b/spaces/Dantra1/CeliaSensei/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/Dantra1/CeliaSensei/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/Dauzy/whisper-webui/src/whisper/fasterWhisperContainer.py b/spaces/Dauzy/whisper-webui/src/whisper/fasterWhisperContainer.py deleted file mode 100644 index 5bd640eeba90f7ad2c6a2795ed14e40d30e90c4c..0000000000000000000000000000000000000000 --- a/spaces/Dauzy/whisper-webui/src/whisper/fasterWhisperContainer.py +++ /dev/null @@ -1,207 +0,0 @@ -import os -from typing import List, Union - -from faster_whisper import WhisperModel, download_model -from src.config import ModelConfig, VadInitialPromptMode -from src.hooks.progressListener import ProgressListener -from src.languages import get_language_from_name -from src.modelCache import ModelCache -from src.prompts.abstractPromptStrategy import AbstractPromptStrategy -from src.whisper.abstractWhisperContainer import AbstractWhisperCallback, AbstractWhisperContainer -from src.utils import format_timestamp - -class FasterWhisperContainer(AbstractWhisperContainer): - def __init__(self, model_name: str, device: str = None, compute_type: str = "float16", - download_root: str = None, - cache: ModelCache = None, models: List[ModelConfig] = []): - super().__init__(model_name, device, compute_type, download_root, cache, models) - - def ensure_downloaded(self): - """ - Ensure that the model is downloaded. This is useful if you want to ensure that the model is downloaded before - passing the container to a subprocess. - """ - model_config = self._get_model_config() - - if os.path.isdir(model_config.url): - model_config.path = model_config.url - else: - model_config.path = download_model(model_config.url, output_dir=self.download_root) - - def _get_model_config(self) -> ModelConfig: - """ - Get the model configuration for the model. - """ - for model in self.models: - if model.name == self.model_name: - return model - return None - - def _create_model(self): - print("Loading faster whisper model " + self.model_name + " for device " + str(self.device)) - model_config = self._get_model_config() - model_url = model_config.url - - if model_config.type == "whisper": - if model_url not in ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"]: - raise Exception("FasterWhisperContainer does not yet support Whisper models. Use ct2-transformers-converter to convert the model to a faster-whisper model.") - if model_url == "large": - # large is an alias for large-v1 - model_url = "large-v1" - - device = self.device - - if (device is None): - device = "auto" - - model = WhisperModel(model_url, device=device, compute_type=self.compute_type) - return model - - def create_callback(self, language: str = None, task: str = None, - prompt_strategy: AbstractPromptStrategy = None, - **decodeOptions: dict) -> AbstractWhisperCallback: - """ - Create a WhisperCallback object that can be used to transcript audio files. - - Parameters - ---------- - language: str - The target language of the transcription. If not specified, the language will be inferred from the audio content. - task: str - The task - either translate or transcribe. - prompt_strategy: AbstractPromptStrategy - The prompt strategy to use. If not specified, the prompt from Whisper will be used. - decodeOptions: dict - Additional options to pass to the decoder. Must be pickleable. - - Returns - ------- - A WhisperCallback object. - """ - return FasterWhisperCallback(self, language=language, task=task, prompt_strategy=prompt_strategy, **decodeOptions) - -class FasterWhisperCallback(AbstractWhisperCallback): - def __init__(self, model_container: FasterWhisperContainer, language: str = None, task: str = None, - prompt_strategy: AbstractPromptStrategy = None, - **decodeOptions: dict): - self.model_container = model_container - self.language = language - self.task = task - self.prompt_strategy = prompt_strategy - self.decodeOptions = decodeOptions - - self._printed_warning = False - - def invoke(self, audio, segment_index: int, prompt: str, detected_language: str, progress_listener: ProgressListener = None): - """ - Peform the transcription of the given audio file or data. - - Parameters - ---------- - audio: Union[str, np.ndarray, torch.Tensor] - The audio file to transcribe, or the audio data as a numpy array or torch tensor. - segment_index: int - The target language of the transcription. If not specified, the language will be inferred from the audio content. - task: str - The task - either translate or transcribe. - progress_listener: ProgressListener - A callback to receive progress updates. - """ - model: WhisperModel = self.model_container.get_model() - language_code = self._lookup_language_code(self.language) if self.language else None - - # Copy decode options and remove options that are not supported by faster-whisper - decodeOptions = self.decodeOptions.copy() - verbose = decodeOptions.pop("verbose", None) - - logprob_threshold = decodeOptions.pop("logprob_threshold", None) - - patience = decodeOptions.pop("patience", None) - length_penalty = decodeOptions.pop("length_penalty", None) - suppress_tokens = decodeOptions.pop("suppress_tokens", None) - - if (decodeOptions.pop("fp16", None) is not None): - if not self._printed_warning: - print("WARNING: fp16 option is ignored by faster-whisper - use compute_type instead.") - self._printed_warning = True - - # Fix up decode options - if (logprob_threshold is not None): - decodeOptions["log_prob_threshold"] = logprob_threshold - - decodeOptions["patience"] = float(patience) if patience is not None else 1.0 - decodeOptions["length_penalty"] = float(length_penalty) if length_penalty is not None else 1.0 - - # See if supress_tokens is a string - if so, convert it to a list of ints - decodeOptions["suppress_tokens"] = self._split_suppress_tokens(suppress_tokens) - - initial_prompt = self.prompt_strategy.get_segment_prompt(segment_index, prompt, detected_language) \ - if self.prompt_strategy else prompt - - segments_generator, info = model.transcribe(audio, \ - language=language_code if language_code else detected_language, task=self.task, \ - initial_prompt=initial_prompt, \ - **decodeOptions - ) - - segments = [] - - for segment in segments_generator: - segments.append(segment) - - if progress_listener is not None: - progress_listener.on_progress(segment.end, info.duration) - if verbose: - print("[{}->{}] {}".format(format_timestamp(segment.start, True), format_timestamp(segment.end, True), - segment.text)) - - text = " ".join([segment.text for segment in segments]) - - # Convert the segments to a format that is easier to serialize - whisper_segments = [{ - "text": segment.text, - "start": segment.start, - "end": segment.end, - - # Extra fields added by faster-whisper - "words": [{ - "start": word.start, - "end": word.end, - "word": word.word, - "probability": word.probability - } for word in (segment.words if segment.words is not None else []) ] - } for segment in segments] - - result = { - "segments": whisper_segments, - "text": text, - "language": info.language if info else None, - - # Extra fields added by faster-whisper - "language_probability": info.language_probability if info else None, - "duration": info.duration if info else None - } - - # If we have a prompt strategy, we need to increment the current prompt - if self.prompt_strategy: - self.prompt_strategy.on_segment_finished(segment_index, prompt, detected_language, result) - - if progress_listener is not None: - progress_listener.on_finished() - return result - - def _split_suppress_tokens(self, suppress_tokens: Union[str, List[int]]): - if (suppress_tokens is None): - return None - if (isinstance(suppress_tokens, list)): - return suppress_tokens - - return [int(token) for token in suppress_tokens.split(",")] - - def _lookup_language_code(self, language: str): - language = get_language_from_name(language) - - if language is None: - raise ValueError("Invalid language: " + language) - - return language.code diff --git a/spaces/Detomo/ai-comic-generation/src/app/ocr.tsx b/spaces/Detomo/ai-comic-generation/src/app/ocr.tsx deleted file mode 100644 index 609fd98930e67163d3044d544b6b9b312703937e..0000000000000000000000000000000000000000 --- a/spaces/Detomo/ai-comic-generation/src/app/ocr.tsx +++ /dev/null @@ -1,3 +0,0 @@ -"use client" - -import { createWorker } from "tesseract.js" \ No newline at end of file diff --git a/spaces/Djacon/emotion_detection/files/css/main.css b/spaces/Djacon/emotion_detection/files/css/main.css deleted file mode 100644 index 9cbd41b68ea6504bc68dac73eaf96122d0d53384..0000000000000000000000000000000000000000 --- a/spaces/Djacon/emotion_detection/files/css/main.css +++ /dev/null @@ -1,2451 +0,0 @@ -@import url(https://fonts.googleapis.com/css?family=Roboto:300,400,500,700,900&display=swap); @import url(https://fonts.googleapis.com/css2?family=Cairo:wght@200;300;400;600;700;900&display=swap); /*! tailwindcss v3.1.6 | MIT License | https://tailwindcss.com*/ -*,:after,:before { - border: 0 solid #e5e7eb; - box-sizing: border-box -} - -:after,:before { - --tw-content: "" -} - -html { - -webkit-text-size-adjust: 100%; - font-family: ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji; - line-height: 1.5; - -moz-tab-size: 4; - -o-tab-size: 4; - tab-size: 4 -} - -body { - line-height: inherit; - margin: 0 -} - -hr { - border-top-width: 1px; - color: inherit; - height: 0 -} - -abbr:where([title]) { - -webkit-text-decoration: underline dotted; - text-decoration: underline dotted -} - -h1,h2,h3,h4,h5,h6 { - font-size: inherit; - font-weight: inherit -} - -a { - color: inherit; - text-decoration: inherit -} - -b,strong { - font-weight: bolder -} - -code,kbd,pre,samp { - font-family: ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace; - font-size: 1em -} - -small { - font-size: 80% -} - -sub,sup { - font-size: 75%; - line-height: 0; - position: relative; - vertical-align: baseline -} - -sub { - bottom: -.25em -} - -sup { - top: -.5em -} - -table { - border-collapse: collapse; - border-color: inherit; - text-indent: 0 -} - -button,input,optgroup,select,textarea { - color: inherit; - font-family: inherit; - font-size: 100%; - font-weight: inherit; - line-height: inherit; - margin: 0; - padding: 0 -} - -button,select { - text-transform: none -} - -[type=button],[type=reset],[type=submit],button { - -webkit-appearance: button; - background-color: transparent; - background-image: none -} - -:-moz-focusring { - outline: auto -} - -:-moz-ui-invalid { - box-shadow: none -} - -progress { - vertical-align: baseline -} - -::-webkit-inner-spin-button,::-webkit-outer-spin-button { - height: auto -} - -[type=search] { - -webkit-appearance: textfield; - outline-offset: -2px -} - -::-webkit-search-decoration { - -webkit-appearance: none -} - -::-webkit-file-upload-button { - -webkit-appearance: button; - font: inherit -} - -summary { - display: list-item -} - -blockquote,dd,dl,figure,h1,h2,h3,h4,h5,h6,hr,p,pre { - margin: 0 -} - -fieldset { - margin: 0 -} - -fieldset,legend { - padding: 0 -} - -menu,ol,ul { - list-style: none; - margin: 0; - padding: 0 -} - -textarea { - resize: vertical -} - -input::-moz-placeholder,textarea::-moz-placeholder { - color: #9ca3af; - opacity: 1 -} - -input::placeholder,textarea::placeholder { - color: #9ca3af; - opacity: 1 -} - -[role=button],button { - cursor: pointer -} - -:disabled { - cursor: default -} - -audio,canvas,embed,iframe,img,object,svg,video { - display: block; - vertical-align: middle -} - -img,video { - height: auto; - max-width: 100% -} - -[multiple],[type=date],[type=datetime-local],[type=email],[type=month],[type=number],[type=password],[type=search],[type=tel],[type=text],[type=time],[type=url],[type=week],select,textarea { - --tw-shadow: 0 0 #0000; - -webkit-appearance: none; - -moz-appearance: none; - appearance: none; - background-color: #fff; - border-color: #6b7280; - border-radius: 0; - border-width: 1px; - font-size: 1rem; - line-height: 1.5rem; - padding: .5rem .75rem -} - -[multiple]:focus,[type=date]:focus,[type=datetime-local]:focus,[type=email]:focus,[type=month]:focus,[type=number]:focus,[type=password]:focus,[type=search]:focus,[type=tel]:focus,[type=text]:focus,[type=time]:focus,[type=url]:focus,[type=week]:focus,select:focus,textarea:focus { - --tw-ring-inset: var(--tw-empty,/*!*/ /*!*/); - --tw-ring-offset-width: 0px; - --tw-ring-offset-color: #fff; - --tw-ring-color: #2563eb; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color); - border-color: #2563eb; - box-shadow: var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow); - outline: 2px solid transparent; - outline-offset: 2px -} - -input::-moz-placeholder,textarea::-moz-placeholder { - color: #6b7280; - opacity: 1 -} - -input::placeholder,textarea::placeholder { - color: #6b7280; - opacity: 1 -} - -::-webkit-datetime-edit-fields-wrapper { - padding: 0 -} - -::-webkit-date-and-time-value { - min-height: 1.5em -} - -::-webkit-datetime-edit,::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-meridiem-field,::-webkit-datetime-edit-millisecond-field,::-webkit-datetime-edit-minute-field,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-second-field,::-webkit-datetime-edit-year-field { - padding-bottom: 0; - padding-top: 0 -} - -select { - -webkit-print-color-adjust: exact; - print-color-adjust: exact; - background-image: url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3E%3Cpath stroke='%236b7280' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='m6 8 4 4 4-4'/%3E%3C/svg%3E"); - background-position: right .5rem center; - background-repeat: no-repeat; - background-size: 1.5em 1.5em; - color-adjust: exact; - padding-right: 2.5rem -} - -[multiple] { - -webkit-print-color-adjust: unset; - print-color-adjust: unset; - background-image: none; - background-position: 0 0; - background-repeat: unset; - background-size: initial; - color-adjust: unset; - padding-right: .75rem -} - -[type=checkbox],[type=radio] { - -webkit-print-color-adjust: exact; - print-color-adjust: exact; - --tw-shadow: 0 0 #0000; - -webkit-appearance: none; - -moz-appearance: none; - appearance: none; - background-color: #fff; - background-origin: border-box; - border-color: #6b7280; - border-width: 1px; - color: #2563eb; - color-adjust: exact; - display: inline-block; - flex-shrink: 0; - height: 1rem; - padding: 0; - -webkit-user-select: none; - -moz-user-select: none; - user-select: none; - vertical-align: middle; - width: 1rem -} - -[type=checkbox] { - border-radius: 0 -} - -[type=radio] { - border-radius: 100% -} - -[type=checkbox]:focus,[type=radio]:focus { - --tw-ring-inset: var(--tw-empty,/*!*/ /*!*/); - --tw-ring-offset-width: 2px; - --tw-ring-offset-color: #fff; - --tw-ring-color: #2563eb; - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color); - box-shadow: var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow); - outline: 2px solid transparent; - outline-offset: 2px -} - -[type=checkbox]:checked,[type=radio]:checked { - background-color: currentColor; - background-position: 50%; - background-repeat: no-repeat; - background-size: 100% 100%; - border-color: transparent -} - -[type=checkbox]:checked { - background-image: url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 16 16' fill='%23fff' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12.207 4.793a1 1 0 0 1 0 1.414l-5 5a1 1 0 0 1-1.414 0l-2-2a1 1 0 0 1 1.414-1.414L6.5 9.086l4.293-4.293a1 1 0 0 1 1.414 0z'/%3E%3C/svg%3E") -} - -[type=radio]:checked { - background-image: url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 16 16' fill='%23fff' xmlns='http://www.w3.org/2000/svg'%3E%3Ccircle cx='8' cy='8' r='3'/%3E%3C/svg%3E") -} - -[type=checkbox]:checked:focus,[type=checkbox]:checked:hover,[type=radio]:checked:focus,[type=radio]:checked:hover { - background-color: currentColor; - border-color: transparent -} - -[type=checkbox]:indeterminate { - background-color: currentColor; - background-image: url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 16 16'%3E%3Cpath stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M4 8h8'/%3E%3C/svg%3E"); - background-position: 50%; - background-repeat: no-repeat; - background-size: 100% 100%; - border-color: transparent -} - -[type=checkbox]:indeterminate:focus,[type=checkbox]:indeterminate:hover { - background-color: currentColor; - border-color: transparent -} - -[type=file] { - background: unset; - border-color: inherit; - border-radius: 0; - border-width: 0; - font-size: unset; - line-height: inherit; - padding: 0 -} - -[type=file]:focus { - outline: 1px solid ButtonText; - outline: 1px auto -webkit-focus-ring-color -} - -*,:after,:before { - --tw-border-spacing-x: 0; - --tw-border-spacing-y: 0; - --tw-translate-x: 0; - --tw-translate-y: 0; - --tw-rotate: 0; - --tw-skew-x: 0; - --tw-skew-y: 0; - --tw-scale-x: 1; - --tw-scale-y: 1; - --tw-pan-x: ; - --tw-pan-y: ; - --tw-pinch-zoom: ; - --tw-scroll-snap-strictness: proximity; - --tw-ordinal: ; - --tw-slashed-zero: ; - --tw-numeric-figure: ; - --tw-numeric-spacing: ; - --tw-numeric-fraction: ; - --tw-ring-inset: ; - --tw-ring-offset-width: 0px; - --tw-ring-offset-color: #fff; - --tw-ring-color: rgba(59,130,246,.5); - --tw-ring-offset-shadow: 0 0 #0000; - --tw-ring-shadow: 0 0 #0000; - --tw-shadow: 0 0 #0000; - --tw-shadow-colored: 0 0 #0000; - --tw-blur: ; - --tw-brightness: ; - --tw-contrast: ; - --tw-grayscale: ; - --tw-hue-rotate: ; - --tw-invert: ; - --tw-saturate: ; - --tw-sepia: ; - --tw-drop-shadow: ; - --tw-backdrop-blur: ; - --tw-backdrop-brightness: ; - --tw-backdrop-contrast: ; - --tw-backdrop-grayscale: ; - --tw-backdrop-hue-rotate: ; - --tw-backdrop-invert: ; - --tw-backdrop-opacity: ; - --tw-backdrop-saturate: ; - --tw-backdrop-sepia: -} - -::backdrop { - --tw-border-spacing-x: 0; - --tw-border-spacing-y: 0; - --tw-translate-x: 0; - --tw-translate-y: 0; - --tw-rotate: 0; - --tw-skew-x: 0; - --tw-skew-y: 0; - --tw-scale-x: 1; - --tw-scale-y: 1; - --tw-pan-x: ; - --tw-pan-y: ; - --tw-pinch-zoom: ; - --tw-scroll-snap-strictness: proximity; - --tw-ordinal: ; - --tw-slashed-zero: ; - --tw-numeric-figure: ; - --tw-numeric-spacing: ; - --tw-numeric-fraction: ; - --tw-ring-inset: ; - --tw-ring-offset-width: 0px; - --tw-ring-offset-color: #fff; - --tw-ring-color: rgba(59,130,246,.5); - --tw-ring-offset-shadow: 0 0 #0000; - --tw-ring-shadow: 0 0 #0000; - --tw-shadow: 0 0 #0000; - --tw-shadow-colored: 0 0 #0000; - --tw-blur: ; - --tw-brightness: ; - --tw-contrast: ; - --tw-grayscale: ; - --tw-hue-rotate: ; - --tw-invert: ; - --tw-saturate: ; - --tw-sepia: ; - --tw-drop-shadow: ; - --tw-backdrop-blur: ; - --tw-backdrop-brightness: ; - --tw-backdrop-contrast: ; - --tw-backdrop-grayscale: ; - --tw-backdrop-hue-rotate: ; - --tw-backdrop-invert: ; - --tw-backdrop-opacity: ; - --tw-backdrop-saturate: ; - --tw-backdrop-sepia: -} - -.container { - width: 100% -} - -@media (min-width: 640px) { - .container { - max-width:640px - } -} - -@media (min-width: 768px) { - .container { - max-width:768px - } -} - -@media (min-width: 1024px) { - .container { - max-width:1024px - } -} - -@media (min-width: 1280px) { - .container { - max-width:1280px - } -} - -@media (min-width: 1536px) { - .container { - max-width:1536px - } -} - -.fixed { - position: fixed -} - -.absolute { - position: absolute -} - -.relative { - position: relative -} - -.inset-0 { - left: 0; - right: 0 -} - -.inset-0,.inset-y-0 { - bottom: 0; - top: 0 -} - -.inset-x-0 { - left: 0; - right: 0 -} - -.left-0 { - left: 0 -} - -.top-4 { - top: 1rem -} - -.left-4 { - left: 1rem -} - -.right-3 { - right: .75rem -} - -.bottom-6 { - bottom: 1.5rem -} - -.right-0 { - right: 0 -} - -.z-0 { - z-index: 0 -} - -.z-30 { - z-index: 30 -} - -.z-20 { - z-index: 20 -} - -.z-10 { - z-index: 10 -} - -.z-40 { - z-index: 40 -} - -.z-50 { - z-index: 50 -} - -.order-2 { - order: 2 -} - -.my-6 { - margin-bottom: 1.5rem; - margin-top: 1.5rem -} - -.mx-1\.5 { - margin-left: .375rem; - margin-right: .375rem -} - -.mx-1 { - margin-left: .25rem; - margin-right: .25rem -} - -.mx-auto { - margin-left: auto; - margin-right: auto -} - -.mx-3 { - margin-left: .75rem; - margin-right: .75rem -} - -.mx-2 { - margin-left: .5rem; - margin-right: .5rem -} - -.-mx-1 { - margin-left: -.25rem; - margin-right: -.25rem -} - -.my-20 { - margin-bottom: 5rem; - margin-top: 5rem -} - -.my-4 { - margin-bottom: 1rem; - margin-top: 1rem -} - -.-my-2 { - margin-bottom: -.5rem; - margin-top: -.5rem -} - -.mx-4 { - margin-left: 1rem; - margin-right: 1rem -} - -.-mx-2 { - margin-left: -.5rem; - margin-right: -.5rem -} - -.mx-6 { - margin-left: 1.5rem; - margin-right: 1.5rem -} - -.mx-0\.5 { - margin-left: .125rem; - margin-right: .125rem -} - -.mx-0 { - margin-left: 0; - margin-right: 0 -} - -.mt-6 { - margin-top: 1.5rem -} - -.mt-2 { - margin-top: .5rem -} - -.mt-4 { - margin-top: 1rem -} - -.mb-5 { - margin-bottom: 1.25rem -} - -.mt-8 { - margin-top: 2rem -} - -.mt-3 { - margin-top: .75rem -} - -.mb-2 { - margin-bottom: .5rem -} - -.mt-1 { - margin-top: .25rem -} - -.-mb-8 { - margin-bottom: -2rem -} - -.-ml-px { - margin-left: -1px -} - -.mt-5 { - margin-top: 1.25rem -} - -.mt-10 { - margin-top: 2.5rem -} - -.-mr-1 { - margin-right: -.25rem -} - -.-mt-20 { - margin-top: -5rem -} - -.-mt-2 { - margin-top: -.5rem -} - -.block { - display: block -} - -.inline-block { - display: inline-block -} - -.flex { - display: flex -} - -.inline-flex { - display: inline-flex -} - -.table { - display: table -} - -.flow-root { - display: flow-root -} - -.grid { - display: grid -} - -.hidden { - display: none -} - -.h-96 { - height: 24rem -} - -.h-1 { - height: .25rem -} - -.h-6 { - height: 1.5rem -} - -.h-32 { - height: 8rem -} - -.h-48 { - height: 12rem -} - -.h-8 { - height: 2rem -} - -.h-4 { - height: 1rem -} - -.h-5 { - height: 1.25rem -} - -.h-2 { - height: .5rem -} - -.h-full { - height: 100% -} - -.h-10 { - height: 2.5rem -} - -.h-36 { - height: 9rem -} - -.h-24 { - height: 6rem -} - -.h-56 { - height: 14rem -} - -.h-12 { - height: 3rem -} - -.h-16 { - height: 4rem -} - -.h-9 { - height: 2.25rem -} - -.h-screen { - height: 100vh -} - -.min-h-screen { - min-height: 100vh -} - -.w-32 { - width: 8rem -} - -.w-44 { - width: 11rem -} - -.w-6 { - width: 1.5rem -} - -.w-full { - width: 100% -} - -.w-8\/12 { - width: 66.666667% -} - -.w-11\/12 { - width: 91.666667% -} - -.w-5\/6 { - width: 83.333333% -} - -.w-2\/3 { - width: 66.666667% -} - -.w-8 { - width: 2rem -} - -.w-4 { - width: 1rem -} - -.w-5 { - width: 1.25rem -} - -.w-3\/12 { - width: 25% -} - -.w-9\/12 { - width: 75% -} - -.w-0\.5 { - width: .125rem -} - -.w-0 { - width: 0 -} - -.w-10 { - width: 2.5rem -} - -.w-24 { - width: 6rem -} - -.w-auto { - width: auto -} - -.w-12 { - width: 3rem -} - -.w-16 { - width: 4rem -} - -.w-1 { - width: .25rem -} - -.w-9 { - width: 2.25rem -} - -.w-64 { - width: 16rem -} - -.w-56 { - width: 14rem -} - -.min-w-full { - min-width: 100% -} - -.min-w-0 { - min-width: 0 -} - -.min-w-\[26rem\] { - min-width: 26rem -} - -.max-w-xs { - max-width: 20rem -} - -.max-w-xl { - max-width: 36rem -} - -.max-w-sm { - max-width: 24rem -} - -.flex-1 { - flex: 1 1 0% -} - -.flex-shrink-0 { - flex-shrink: 0 -} - -.translate-y-4 { - --tw-translate-y: 1rem -} - -.translate-y-0,.translate-y-4 { - transform: translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)) -} - -.translate-y-0 { - --tw-translate-y: 0px -} - -.translate-x-full { - --tw-translate-x: 100% -} - -.translate-x-0,.translate-x-full { - transform: translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)) -} - -.translate-x-0 { - --tw-translate-x: 0px -} - -.translate-x-20 { - --tw-translate-x: 5rem -} - -.-translate-x-full,.translate-x-20 { - transform: translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)) -} - -.-translate-x-full { - --tw-translate-x: -100% -} - -.-translate-y-5 { - --tw-translate-y: -1.25rem -} - -.-translate-y-5,.scale-95 { - transform: translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)) -} - -.scale-95 { - --tw-scale-x: .95; - --tw-scale-y: .95 -} - -.scale-100 { - --tw-scale-x: 1; - --tw-scale-y: 1 -} - -.scale-100,.transform { - transform: translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)) -} - -.cursor-pointer { - cursor: pointer -} - -.cursor-not-allowed { - cursor: not-allowed -} - -.appearance-none { - -webkit-appearance: none; - -moz-appearance: none; - appearance: none -} - -.grid-cols-2 { - grid-template-columns: repeat(2,minmax(0,1fr)) -} - -.grid-cols-1 { - grid-template-columns: repeat(1,minmax(0,1fr)) -} - -.grid-cols-5 { - grid-template-columns: repeat(5,minmax(0,1fr)) -} - -.flex-col { - flex-direction: column -} - -.place-items-start { - place-items: start -} - -.place-items-center { - place-items: center -} - -.items-end { - align-items: flex-end -} - -.items-center { - align-items: center -} - -.justify-end { - justify-content: flex-end -} - -.justify-center { - justify-content: center -} - -.justify-between { - justify-content: space-between -} - -.gap-12 { - gap: 3rem -} - -.gap-8 { - gap: 2rem -} - -.gap-6 { - gap: 1.5rem -} - -.gap-4 { - gap: 1rem -} - -.space-y-6>:not([hidden])~:not([hidden]) { - --tw-space-y-reverse: 0; - margin-bottom: calc(1.5rem*var(--tw-space-y-reverse)); - margin-top: calc(1.5rem*(1 - var(--tw-space-y-reverse))) -} - -.space-y-5>:not([hidden])~:not([hidden]) { - --tw-space-y-reverse: 0; - margin-bottom: calc(1.25rem*var(--tw-space-y-reverse)); - margin-top: calc(1.25rem*(1 - var(--tw-space-y-reverse))) -} - -.-space-x-1>:not([hidden])~:not([hidden]) { - --tw-space-x-reverse: 0; - margin-left: calc(-.25rem*(1 - var(--tw-space-x-reverse))); - margin-right: calc(-.25rem*var(--tw-space-x-reverse)) -} - -.space-y-4>:not([hidden])~:not([hidden]) { - --tw-space-y-reverse: 0; - margin-bottom: calc(1rem*var(--tw-space-y-reverse)); - margin-top: calc(1rem*(1 - var(--tw-space-y-reverse))) -} - -.space-y-3>:not([hidden])~:not([hidden]) { - --tw-space-y-reverse: 0; - margin-bottom: calc(.75rem*var(--tw-space-y-reverse)); - margin-top: calc(.75rem*(1 - var(--tw-space-y-reverse))) -} - -.space-y-8>:not([hidden])~:not([hidden]) { - --tw-space-y-reverse: 0; - margin-bottom: calc(2rem*var(--tw-space-y-reverse)); - margin-top: calc(2rem*(1 - var(--tw-space-y-reverse))) -} - -.divide-y>:not([hidden])~:not([hidden]) { - --tw-divide-y-reverse: 0; - border-bottom-width: calc(1px*var(--tw-divide-y-reverse)); - border-top-width: calc(1px*(1 - var(--tw-divide-y-reverse))) -} - -.divide-gray-200>:not([hidden])~:not([hidden]) { - --tw-divide-opacity: 1; - border-color: rgb(229 231 235/var(--tw-divide-opacity)) -} - -.divide-gray-100>:not([hidden])~:not([hidden]) { - --tw-divide-opacity: 1; - border-color: rgb(243 244 246/var(--tw-divide-opacity)) -} - -.overflow-hidden { - overflow: hidden -} - -.overflow-x-auto { - overflow-x: auto -} - -.overflow-y-auto { - overflow-y: auto -} - -.truncate { - overflow: hidden; - text-overflow: ellipsis -} - -.truncate,.whitespace-nowrap { - white-space: nowrap -} - -.rounded-md { - border-radius: .375rem -} - -.rounded-lg { - border-radius: .5rem -} - -.rounded-l-lg { - border-top-left-radius: 0.75rem; - border-bottom-left-radius: 0.75rem; -} - -.rounded-r-lg { - border-top-right-radius: 0.75rem; - border-bottom-right-radius: 0.75rem; -} - -.rounded-full { - border-radius: 9999px -} - -.border { - border-width: 1px -} - -.border-2 { - border-width: 2px -} - -.border-0 { - border-width: 0 -} - -.border-b { - border-bottom-width: 1px -} - -.border-r { - border-right-width: 1px -} - -.border-dashed { - border-style: dashed -} - -.border-gray-200 { - --tw-border-opacity: 1; - border-color: rgb(229 231 235/var(--tw-border-opacity)) -} - -.border-green-500 { - --tw-border-opacity: 1; - border-color: rgb(34 197 94/var(--tw-border-opacity)) -} - -.border-red-500 { - --tw-border-opacity: 1; - border-color: rgb(239 68 68/var(--tw-border-opacity)) -} - -.border-indigo-500 { - --tw-border-opacity: 1; - border-color: #ffa116; -} - -.border-gray-300 { - --tw-border-opacity: 1; - border-color: rgb(209 213 219/var(--tw-border-opacity)) -} - -.border-gray-100 { - --tw-border-opacity: 1; - border-color: rgb(243 244 246/var(--tw-border-opacity)) -} - -.bg-white { - --tw-bg-opacity: 1; - background-color: rgb(255 255 255/var(--tw-bg-opacity)) -} - -.bg-indigo-500 { - /* --tw-bg-opacity: 1; */ - background-color: #ffa116; -} - -.bg-gray-200 { - --tw-bg-opacity: 1; - background-color: rgb(229 231 235/var(--tw-bg-opacity)) -} - -.bg-gray-100 { - --tw-bg-opacity: 1; - background-color: rgb(243 244 246/var(--tw-bg-opacity)) -} - -.bg-gray-500 { - --tw-bg-opacity: 1; - background-color: rgb(107 114 128/var(--tw-bg-opacity)) -} - -.bg-indigo-200 { - --tw-bg-opacity: 1; - background-color: rgb(199 210 254/var(--tw-bg-opacity)) -} - -.bg-gray-300 { - --tw-bg-opacity: 1; - background-color: rgb(209 213 219/var(--tw-bg-opacity)) -} - -.bg-gray-50 { - --tw-bg-opacity: 1; - background-color: rgb(249 250 251/var(--tw-bg-opacity)) -} - -.bg-green-500 { - --tw-bg-opacity: 1; - background-color: rgb(34 197 94/var(--tw-bg-opacity)) -} - -.bg-yellow-500 { - --tw-bg-opacity: 1; - background-color: rgb(234 179 8/var(--tw-bg-opacity)) -} - -.bg-red-500 { - --tw-bg-opacity: 1; - background-color: rgb(239 68 68/var(--tw-bg-opacity)) -} - -.bg-purple-500 { - --tw-bg-opacity: 1; - background-color: rgb(168 85 247/var(--tw-bg-opacity)) -} - -.bg-sky-500 { - --tw-bg-opacity: 1; - background-color: rgb(14 165 233/var(--tw-bg-opacity)) -} - -.bg-black { - --tw-bg-opacity: 1; - background-color: rgb(0 0 0/var(--tw-bg-opacity)) -} - -.bg-opacity-40 { - --tw-bg-opacity: 0.4 -} - -.bg-gradient-to-r { - background-image: linear-gradient(to right,var(--tw-gradient-stops)) -} - -.from-indigo-800 { - --tw-gradient-from: #3730a3; - --tw-gradient-to: rgba(55,48,163,0); - --tw-gradient-stops: var(--tw-gradient-from),var(--tw-gradient-to) -} - -.from-indigo-900 { - --tw-gradient-from: #f8b34c; - --tw-gradient-to: rgba(49,46,129,0); - --tw-gradient-stops: var(--tw-gradient-from),var(--tw-gradient-to) -} - -.via-indigo-900 { - --tw-gradient-to: rgba(49,46,129,0); - --tw-gradient-stops: var(--tw-gradient-from),#312e81,var(--tw-gradient-to) -} - -.to-gray-900 { - --tw-gradient-to: #111827 -} - -.bg-clip-text { - -webkit-background-clip: text; - background-clip: text -} - -.object-cover { - -o-object-fit: cover; - object-fit: cover -} - -.p-4 { - padding: 1rem -} - -.p-2 { - padding: .5rem -} - -.p-5 { - padding: 1.25rem -} - -.p-8 { - padding: 2rem -} - -.p-3 { - padding: .75rem -} - -.p-1\.5 { - padding: .375rem -} - -.p-1 { - padding: .25rem -} - -.px-4 { - padding-left: 1rem; - padding-right: 1rem -} - -.py-2 { - padding-bottom: .5rem; - padding-top: .5rem -} - -.py-2\.5 { - padding-bottom: .625rem; - padding-top: .625rem -} - -.px-2 { - padding-left: .5rem; - padding-right: .5rem -} - -.px-6 { - padding-left: 1.5rem; - padding-right: 1.5rem -} - -.py-3 { - padding-bottom: .75rem; - padding-top: .75rem -} - -.py-4 { - padding-bottom: 1rem; - padding-top: 1rem -} - -.px-3 { - padding-left: .75rem; - padding-right: .75rem -} - -.py-1 { - padding-bottom: .25rem; - padding-top: .25rem -} - -.px-1 { - padding-left: .25rem; - padding-right: .25rem -} - -.px-0\.5 { - padding-left: .125rem; - padding-right: .125rem -} - -.px-0 { - padding-left: 0; - padding-right: 0 -} - -.px-5 { - padding-left: 1.25rem; - padding-right: 1.25rem -} - -.px-8 { - padding-left: 2rem; - padding-right: 2rem -} - -.py-6 { - padding-bottom: 1.5rem; - padding-top: 1.5rem -} - -.py-8 { - padding-bottom: 2rem; - padding-top: 2rem -} - -.pl-3 { - padding-left: .75rem -} - -.pl-5 { - padding-left: 1.25rem -} - -.pl-10 { - padding-left: 2.5rem -} - -.pr-4 { - padding-right: 1rem -} - -.pb-8 { - padding-bottom: 2rem -} - -.pt-1\.5 { - padding-top: .375rem -} - -.pt-1 { - padding-top: .25rem -} - -.pr-24 { - padding-right: 6rem -} - -.pr-16 { - padding-right: 4rem -} - -.pb-6 { - padding-bottom: 1.5rem -} - -.pt-4 { - padding-top: 1rem -} - -.pl-14 { - padding-left: 3.5rem -} - -.text-left { - text-align: left -} - -.text-center { - text-align: center -} - -.align-middle { - vertical-align: middle -} - -.font-roboto { - font-family: Roboto,sans-serif -} - -.text-7xl { - font-size: 4.5rem; - line-height: 1 -} - -.text-xl { - font-size: 1.25rem -} - -.text-lg,.text-xl { - line-height: 1.75rem -} - -.text-lg { - font-size: 1.125rem -} - -.text-sm { - font-size: .875rem; - line-height: 1.25rem -} - -.text-2xl { - font-size: 1.5rem; - line-height: 2rem -} - -.text-xs { - font-size: .75rem; - line-height: 1rem -} - -.text-3xl { - font-size: 1.875rem; - line-height: 2.25rem -} - -.font-bold { - font-weight: 700 -} - -.font-semibold { - font-weight: 600 -} - -.font-medium { - font-weight: 500 -} - -.uppercase { - text-transform: uppercase -} - -.capitalize { - text-transform: capitalize -} - -.tracking-wide { - letter-spacing: .025em -} - -.tracking-wider { - letter-spacing: .05em -} - -.text-transparent { - color: transparent -} - -.text-gray-800 { - --tw-text-opacity: 1; - color: rgb(31 41 55/var(--tw-text-opacity)) -} - -.text-gray-600 { - --tw-text-opacity: 1; - color: rgb(75 85 99/var(--tw-text-opacity)) -} - -.text-gray-700 { - --tw-text-opacity: 1; - color: rgb(55 65 81/var(--tw-text-opacity)) -} - -.text-gray-500 { - --tw-text-opacity: 1; - color: rgb(107 114 128/var(--tw-text-opacity)) -} - -.text-green-500 { - --tw-text-opacity: 1; - color: rgb(34 197 94/var(--tw-text-opacity)) -} - -.text-white { - --tw-text-opacity: 1; - color: rgb(255 255 255/var(--tw-text-opacity)) -} - -.text-gray-400 { - --tw-text-opacity: 1; - color: rgb(156 163 175/var(--tw-text-opacity)) -} - -.text-red-500 { - --tw-text-opacity: 1; - color: rgb(239 68 68/var(--tw-text-opacity)) -} - -.text-indigo-500 { - --tw-text-opacity: 1; - color: #ffa116; -} - -.text-blue-500 { - --tw-text-opacity: 1; - color: rgb(59 130 246/var(--tw-text-opacity)) -} - -.text-cyan-500 { - --tw-text-opacity: 1; - color: rgb(6 182 212/var(--tw-text-opacity)) -} - -.text-indigo-700 { - --tw-text-opacity: 1; - color: #ffa116; -} - -.text-red-600 { - --tw-text-opacity: 1; - color: rgb(220 38 38/var(--tw-text-opacity)) -} - -.text-indigo-600 { - --tw-text-opacity: 1; - color: #ffa116; -} - -.placeholder-gray-400::-moz-placeholder { - --tw-placeholder-opacity: 1; - color: rgb(156 163 175/var(--tw-placeholder-opacity)) -} - -.placeholder-gray-400::placeholder { - --tw-placeholder-opacity: 1; - color: rgb(156 163 175/var(--tw-placeholder-opacity)) -} - -.placeholder-gray-300::-moz-placeholder { - --tw-placeholder-opacity: 1; - color: rgb(209 213 219/var(--tw-placeholder-opacity)) -} - -.placeholder-gray-300::placeholder { - --tw-placeholder-opacity: 1; - color: rgb(209 213 219/var(--tw-placeholder-opacity)) -} - -.opacity-0 { - opacity: 0 -} - -.opacity-100 { - opacity: 1 -} - -.opacity-50 { - opacity: .5 -} - -.shadow-sm { - --tw-shadow: 0 1px 2px 0 rgba(0,0,0,.05); - --tw-shadow-colored: 0 1px 2px 0 var(--tw-shadow-color) -} - -.shadow-sm,.shadow-xl { - box-shadow: var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow) -} - -.shadow-xl { - --tw-shadow: 0 20px 25px -5px rgba(0,0,0,.1),0 8px 10px -6px rgba(0,0,0,.1); - --tw-shadow-colored: 0 20px 25px -5px var(--tw-shadow-color),0 8px 10px -6px var(--tw-shadow-color) -} - -.shadow-lg { - --tw-shadow: 0 10px 15px -3px rgba(0,0,0,.1),0 4px 6px -4px rgba(0,0,0,.1); - --tw-shadow-colored: 0 10px 15px -3px var(--tw-shadow-color),0 4px 6px -4px var(--tw-shadow-color); - box-shadow: var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow) -} - -.ring-2 { - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color) -} - -.ring-2,.ring-8 { - box-shadow: var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000) -} - -.ring-8 { - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(8px + var(--tw-ring-offset-width)) var(--tw-ring-color) -} - -.ring-4 { - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(4px + var(--tw-ring-offset-width)) var(--tw-ring-color) -} - -.ring-1,.ring-4 { - box-shadow: var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000) -} - -.ring-1 { - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color) -} - -.ring-white { - --tw-ring-opacity: 1; - --tw-ring-color: rgb(255 255 255/var(--tw-ring-opacity)) -} - -.ring-indigo-500 { - --tw-ring-opacity: 1; - --tw-ring-color: #ffa116; -} - -.transition-colors { - transition-duration: .15s; - transition-property: color,background-color,border-color,fill,stroke,-webkit-text-decoration-color; - transition-property: color,background-color,border-color,text-decoration-color,fill,stroke; - transition-property: color,background-color,border-color,text-decoration-color,fill,stroke,-webkit-text-decoration-color; - transition-timing-function: cubic-bezier(.4,0,.2,1) -} - -.transition { - transition-duration: .15s; - transition-property: color,background-color,border-color,fill,stroke,opacity,box-shadow,transform,filter,-webkit-text-decoration-color,-webkit-backdrop-filter; - transition-property: color,background-color,border-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter; - transition-property: color,background-color,border-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-text-decoration-color,-webkit-backdrop-filter; - transition-timing-function: cubic-bezier(.4,0,.2,1) -} - -.transition-opacity { - transition-duration: .15s; - transition-property: opacity; - transition-timing-function: cubic-bezier(.4,0,.2,1) -} - -.transition-all { - transition-duration: .15s; - transition-property: all; - transition-timing-function: cubic-bezier(.4,0,.2,1) -} - -.duration-200 { - transition-duration: .2s -} - -.duration-300 { - transition-duration: .3s -} - -.duration-100 { - transition-duration: .1s -} - -.duration-75 { - transition-duration: 75ms -} - -.duration-150 { - transition-duration: .15s -} - -.ease-out { - transition-timing-function: cubic-bezier(0,0,.2,1) -} - -.ease-in { - transition-timing-function: cubic-bezier(.4,0,1,1) -} - -.ease-linear { - transition-timing-function: linear -} - -[x-cloak] { - display: none!important -} - -@media only screen and (min-width: 1024px) { - ::-webkit-scrollbar { - height:10px; - width: 8px - } - - ::-webkit-scrollbar-track { - background: transparent - } - - ::-webkit-scrollbar-thumb { - background: rgba(0,0,0,.2); - border: 2px solid #fff; - border-radius: 50px - } - - ::-webkit-scrollbar-thumb:hover { - background: #374151 - } -} - -.pattern { - background-image: url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' width='600' height='600'%3E%3Cpath fill='%23a0aec0' fill-opacity='.1' d='M600 325.1v-1.17c-6.5 3.83-13.06 7.64-14.68 8.64-10.6 6.56-18.57 12.56-24.68 19.09-5.58 5.95-12.44 10.06-22.42 14.15-1.45.6-2.96 1.2-4.83 1.9l-4.75 1.82c-9.78 3.75-14.8 6.27-18.98 10.1-4.23 3.88-9.65 6.6-16.77 8.84-1.95.6-3.99 1.17-6.47 1.8l-6.14 1.53c-5.29 1.35-8.3 2.37-10.54 3.78-3.08 1.92-6.63 3.26-12.74 5.03a384.1 384.1 0 0 1-4.82 1.36c-2.04.58-3.6 1.04-5.17 1.52a110.03 110.03 0 0 0-11.2 4.05c-2.7 1.15-5.5 3.93-8.78 8.4a157.68 157.68 0 0 0-6.15 9.2c-5.75 9.07-7.58 11.74-10.24 14.51a50.97 50.97 0 0 1-4.6 4.22c-2.33 1.9-10.39 7.54-11.81 8.74a14.68 14.68 0 0 0-3.67 4.15c-1.24 2.3-1.9 4.57-2.78 8.87-2.17 10.61-3.52 14.81-8.2 22.1-4.07 6.33-6.8 9.88-9.83 12.99-.47.48-.95.96-1.5 1.48l-3.75 3.56c-1.67 1.6-3.18 3.12-4.86 4.9a42.44 42.44 0 0 0-9.89 16.94c-2.5 8.13-2.72 15.47-1.76 27.22.47 5.82.51 6.36.51 8.18 0 10.51.12 17.53.63 25.78.24 4.05.56 7.8.97 11.22h.9c-1.13-9.58-1.5-21.83-1.5-37 0-1.86-.04-2.4-.52-8.26-.94-11.63-.72-18.87 1.73-26.85a41.44 41.44 0 0 1 9.65-16.55c1.67-1.76 3.18-3.27 4.83-4.85.63-.6 3.13-2.96 3.75-3.57a71.6 71.6 0 0 0 1.52-1.5c3.09-3.16 5.86-6.76 9.96-13.15 4.77-7.42 6.15-11.71 8.34-22.44.86-4.21 1.5-6.4 2.68-8.6.68-1.25 1.79-2.48 3.43-3.86 1.38-1.15 9.43-6.8 11.8-8.72 1.71-1.4 3.26-2.81 4.7-4.3 2.72-2.85 4.56-5.54 10.36-14.67a156.9 156.9 0 0 1 6.1-9.15c3.2-4.33 5.9-7.01 8.37-8.07 3.5-1.5 7.06-2.77 11.1-4.02a233.84 233.84 0 0 1 7.6-2.2l2.38-.67c6.19-1.79 9.81-3.16 12.98-5.15 2.14-1.33 5.08-2.33 10.27-3.65l6.14-1.53c2.5-.63 4.55-1.2 6.52-1.82 7.24-2.27 12.79-5.06 17.15-9.05 4.05-3.72 9-6.2 18.66-9.9l4.75-1.82c1.87-.72 3.39-1.31 4.85-1.91 10.1-4.15 17.07-8.32 22.76-14.4 6.05-6.45 13.95-12.4 24.49-18.92 1.56-.96 7.82-4.6 14.15-8.33v-64.58c-4 8.15-8.52 14.85-12.7 17.9-2.51 1.82-5.38 4.02-9.04 6.92a1063.87 1063.87 0 0 0-6.23 4.98l-1.27 1.02a2309.25 2309.25 0 0 1-4.87 3.9c-7.55 6-12.9 10.05-17.61 13.19-3.1 2.06-3.86 2.78-8.06 7.13-5.84 6.07-11.72 8.62-29.15 10.95-11.3 1.5-20.04 4.91-30.75 11.07-1.65.94-7.27 4.27-6.97 4.1-2.7 1.58-4.69 2.69-6.64 3.66-5.63 2.8-10.47 4.17-15.71 4.17-17.13 0-41.44 11.51-51.63 22.83-12.05 13.4-31.42 27.7-45.25 31.16-7.4 1.85-11.85 7.05-14.04 14.69-1.26 4.4-1.58 8.28-1.58 13.82 0 .82.01.98.24 3.63.45 5.18.35 8.72-.77 13.26-1.53 6.2-4.89 12.6-10.59 19.43-13.87 16.65-22.88 46.58-22.88 71.68 0 2.39.02 4.26.06 8.75.12 10.8.1 15.8-.22 21.95-.56 11.18-2.09 20.73-5 29.3h-1.05c2.94-8.56 4.49-18.12 5.05-29.35.31-6.13.34-11.1.22-21.9-.04-4.48-.06-6.36-.06-8.75 0-25.32 9.07-55.47 23.12-72.32 5.6-6.72 8.88-12.99 10.38-19.03 1.09-4.4 1.18-7.85.74-12.93-.23-2.7-.24-2.86-.24-3.72 0-5.62.32-9.57 1.62-14.1 2.28-7.95 6.97-13.44 14.76-15.39 13.6-3.4 32.82-17.59 44.75-30.84C409 360.14 433.58 348.5 451 348.5c5.07 0 9.77-1.33 15.26-4.07 1.93-.96 3.9-2.05 6.58-3.62-.3.18 5.33-3.16 6.98-4.11 10.82-6.21 19.66-9.67 31.11-11.2 17.23-2.3 22.9-4.75 28.57-10.64 4.25-4.41 5.04-5.16 8.22-7.28 4.68-3.11 10.01-7.14 17.55-13.14a1113.33 1113.33 0 0 0 4.86-3.89l1.28-1.02a4668.54 4668.54 0 0 1 6.23-4.98c3.67-2.9 6.55-5.12 9.07-6.95 4.37-3.19 9.16-10.56 13.29-19.4v66.9zm0-116.23c-.62.01-1.27.06-1.95.13-6.13.63-13.83 3.45-21.83 7.45-3.64 1.82-8.46 2.67-14.17 2.71-4.7.04-9.72-.47-14.73-1.33-1.7-.3-3.26-.61-4.67-.93a31.55 31.55 0 0 0-3.55-.57 273.4 273.4 0 0 0-16.66-.88c-10.42-.16-17.2.74-17.97 2.73-.38.97.6 2.55 3.03 4.87 1.01.97 2.22 2.03 4.04 3.55a1746.07 1746.07 0 0 0 4.79 4.02c1.39 1.2 3.1 1.92 5.5 2.5.7.16.86.2 2.64.54 3.53.7 5.03 1.25 6.15 2.63 1.41 1.76 1.4 4.54-.15 8.88-2.44 6.83-5.72 10.05-10.19 10.33-3.63.23-7.6-1.29-14.52-5.06-4.53-2.47-6.82-7.3-8.32-15.26-.17-.87-.32-1.78-.5-2.86l-.43-2.76c-1.05-6.58-1.9-9.2-3.73-10.11-.81-.4-1.59-.74-2.36-1-2.27-.77-4.6-1.02-8.1-.92-2.29.07-14.7 1-13.77.93-20.55 1.37-28.8 5.05-37.09 14.99a133.07 133.07 0 0 0-4.25 5.44l-2.3 3.09-2.51 3.32c-4.1 5.36-7.06 8.48-10.39 11.12-.65.52-1.33 1.04-2.13 1.62l-4.11 2.94a106.8 106.8 0 0 0-5.16 3.99c-4.55 3.74-9.74 8.6-16.25 15.38-8.25 8.58-11.78 13.54-11.7 15.95.07 1.65 1.64 2.11 6.79 2.38 1.61.09 2.15.12 2.98.2 2.95.24 5.09.73 6.81 1.68 7.48 4.15 11.63 7.26 13.95 11.58 3.3 6.15.8 12.88-8.89 20.26-8.28 6.3-11.1 10.37-11.31 14.96-.06 1.17 0 1.93.26 4.43.69 6.47.25 10.65-2.8 17.42a44.23 44.23 0 0 1-4.16 7.53c-2.82 3.97-5.47 5.74-10.6 7.69-.43.16-3.34 1.23-4.27 1.59-1.8.68-3.38 1.36-5.01 2.14-4.18 2-8.4 4.6-13.1 8.24-8.44 6.51-13.23 14.56-15.98 25.06-1.1 4.2-1.55 6.81-2.8 15.21-1.26 8.6-2.17 12.64-4.08 16.55-2.1 4.28-11.93 26.59-12.97 28.88a382.7 382.7 0 0 1-6.37 13.41c-4.07 8.11-7.61 14.07-10.73 17.81-5.38 6.46-8.98 14.37-13.77 28.42a810.14 810.14 0 0 0-1.89 5.6c-1.8 5.35-2.96 8.6-4.26 11.85-6.13 15.32-25.43 26.31-46.46 26.31-11.2 0-20.58-2.74-31.02-8.55-5.6-3.13-4.55-2.42-22.26-14.54-14.33-9.8-17.7-10.73-20.47-6.9-.37.5-1.81 2.74-1.83 2.77a52.24 52.24 0 0 1-4.94 5.9c-.73.79-5.52 5.87-6.97 7.45-2.38 2.6-4.3 4.81-5.98 6.93a45.6 45.6 0 0 0-5.08 7.66c-1.29 2.57-1.9 5.25-2.66 10.6a997.6 997.6 0 0 1-.46 3.18h-1l.47-3.32c.77-5.45 1.4-8.2 2.75-10.9a46.54 46.54 0 0 1 5.2-7.84c1.7-2.14 3.63-4.38 6.03-6.98 1.45-1.59 6.24-6.68 6.96-7.46a51.58 51.58 0 0 0 4.84-5.78s1.47-2.26 1.86-2.8c3.25-4.5 7.08-3.44 21.84 6.67 17.67 12.08 16.62 11.38 22.19 14.48 10.3 5.73 19.5 8.43 30.53 8.43 20.65 0 39.57-10.77 45.54-25.69a219.7 219.7 0 0 0 4.24-11.8 6752.32 6752.32 0 0 0 1.88-5.6c4.83-14.16 8.47-22.14 13.96-28.73 3.05-3.66 6.56-9.57 10.6-17.61 1.97-3.93 4.04-8.31 6.35-13.38 1.03-2.28 10.88-24.61 12.98-28.91 1.85-3.79 2.75-7.76 4-16.25 1.24-8.44 1.7-11.07 2.81-15.32 2.8-10.7 7.71-18.94 16.33-25.6a73.18 73.18 0 0 1 13.29-8.35c1.66-.8 3.27-1.48 5.08-2.18.94-.36 3.86-1.43 4.28-1.59 4.95-1.88 7.44-3.55 10.14-7.33 1.35-1.9 2.68-4.3 4.06-7.37 2.97-6.58 3.39-10.59 2.72-16.9a27.13 27.13 0 0 1-.27-4.58c.22-4.94 3.21-9.24 11.7-15.7 9.33-7.11 11.66-13.34 8.62-19-2.2-4.09-6.25-7.12-13.55-11.17-1.57-.88-3.6-1.33-6.42-1.57-.8-.07-1.34-.1-2.95-.19-5.77-.3-7.63-.85-7.72-3.34-.1-2.81 3.5-7.87 11.97-16.69 6.53-6.8 11.75-11.69 16.33-15.45 1.79-1.47 3.42-2.72 5.2-4.03l4.12-2.94c.79-.58 1.46-1.08 2.1-1.59 3.26-2.6 6.16-5.65 10.21-10.94a383.2 383.2 0 0 0 2.5-3.32l2.31-3.09c1.8-2.39 3.04-4 4.29-5.48 8.47-10.17 16.98-13.96 37.27-15.3-.44.02 12-.9 14.32-.98 3.62-.1 6.05.16 8.46.98.8.27 1.62.62 2.47 1.04 2.27 1.14 3.17 3.87 4.27 10.85l.44 2.76c.17 1.07.33 1.97.5 2.83 1.44 7.69 3.62 12.29 7.8 14.57 6.76 3.68 10.6 5.15 13.99 4.94 4-.25 6.99-3.17 9.3-9.67 1.45-4.04 1.46-6.49.32-7.92-.9-1.12-2.28-1.62-5.57-2.27a55.8 55.8 0 0 1-2.67-.55c-2.54-.6-4.39-1.4-5.93-2.71a252.63 252.63 0 0 0-4.78-4.01 84.35 84.35 0 0 1-4.08-3.6c-2.73-2.6-3.86-4.43-3.28-5.95 1.02-2.64 7.82-3.54 18.93-3.37a230.56 230.56 0 0 1 16.73.88c2.76.39 3.2.49 3.68.6 1.4.3 2.95.62 4.62.91a82.9 82.9 0 0 0 14.56 1.32c5.56-.04 10.24-.86 13.73-2.6 8.1-4.05 15.89-6.9 22.17-7.56.7-.07 1.4-.11 2.05-.13v1zm0-100.94v1.5c-8.62 16.05-17.27 29.55-23.65 35.92-3.19 3.2-7.62 4.9-13.54 5.56-4.45.48-8.28.4-19.18-.2-9.91-.55-15.32-.44-20.52.78a84.05 84.05 0 0 1-15 2.11l-2.25.14c-12.49.75-19.37 1.78-32.72 5.74-4.5 1.33-9.27 2.49-14.3 3.48a246.27 246.27 0 0 1-32.6 3.97c-7.56.45-13.21.57-20.24.57-5.4 0-11.9 1.61-18 5.18-8.3 4.87-15.06 12.87-19.53 24.5a68.57 68.57 0 0 1-4.56 9.8c-3.6 6.2-6.92 8.99-13.38 12.18l-4.03 1.96a64.48 64.48 0 0 0-15.16 10.25c-8.2 7.33-13.72 16.63-22.54 35.6l-2.08 4.49c-7.3 15.7-11.5 23.3-17.35 29.87-7.7 8.66-20.25 14.42-40.31 20.08-4.37 1.23-19.04 5.08-19.24 5.13-6.92 1.87-11.68 3.34-15.63 4.92-10.55 4.22-18.71 10.52-36.38 26.52l-1.7 1.54c-8.58 7.76-13.41 11.9-18.81 15.88-3.95 2.9-8 5.67-12.97 8.91-2.06 1.34-10.3 6.6-12.33 7.94-11.52 7.5-18.53 13.04-24.62 20.08a62.01 62.01 0 0 0-6.44 8.85c-4.13 6.91-6.27 13.15-9.2 25.11l-1.54 6.26c-.6 2.45-1.15 4.54-1.72 6.58-2.97 10.7-6.9 17.36-14.78 26.91L69.6 491a148.51 148.51 0 0 0-4.19 5.3 23.9 23.9 0 0 0-3.44 6.28c-1.16 3.23-1.52 5.9-1.87 11.94-.58 10.05-1.42 15.04-4.63 22.67-1.57 3.72-5.66 14.02-6.41 15.8a73.46 73.46 0 0 1-3.57 7.4c-2.88 5.14-6.71 10.12-13.12 16.95-5.96 6.36-8.87 10.9-10.61 16a56.88 56.88 0 0 0-1.38 4.82l-.46 1.84h-1.03l.52-2.08c.52-2.09.92-3.49 1.4-4.9 1.8-5.25 4.78-9.9 10.84-16.36 6.35-6.78 10.13-11.7 12.97-16.77a72.5 72.5 0 0 0 3.52-7.29c.75-1.76 4.84-12.06 6.4-15.8 3.17-7.5 3.99-12.4 4.56-22.33.35-6.14.72-8.88 1.93-12.23a24.9 24.9 0 0 1 3.58-6.54c1.27-1.7 2.6-3.37 4.22-5.34l4.11-4.95c7.8-9.46 11.66-16 14.59-26.54.56-2.04 1.1-4.12 1.71-6.56l1.53-6.26c2.96-12.04 5.13-18.36 9.32-25.39 1.84-3.08 4-6.05 6.54-8.99 6.17-7.12 13.24-12.7 24.83-20.26 2.05-1.33 10.28-6.6 12.33-7.94 4.96-3.22 9-5.98 12.92-8.87 5.37-3.95 10.19-8.08 18.74-15.82l1.7-1.54c17.76-16.09 25.98-22.43 36.67-26.7 4-1.6 8.8-3.09 15.75-4.96.21-.06 14.87-3.9 19.22-5.13 19.9-5.61 32.32-11.31 39.85-19.78 5.76-6.48 9.93-14.02 17.18-29.64l2.09-4.5c8.87-19.07 14.44-28.46 22.77-35.9a65.48 65.48 0 0 1 15.38-10.4l4.04-1.97c6.3-3.1 9.47-5.77 12.96-11.77a67.6 67.6 0 0 0 4.48-9.67c4.56-11.84 11.47-20.02 19.97-25 6.25-3.66 12.93-5.32 18.5-5.32 7.01 0 12.65-.12 20.17-.57a245.3 245.3 0 0 0 32.47-3.96c5-.98 9.75-2.13 14.22-3.45 13.43-3.98 20.38-5.02 32.94-5.78l2.24-.14c5.76-.37 9.8-.9 14.85-2.09 5.31-1.25 10.79-1.35 22.6-.7 9.04.5 12.84.58 17.21.1 5.71-.62 9.94-2.26 12.95-5.26 6.44-6.45 15.3-20.37 24.35-36.72zm0 450.21c-1.28-4.6-2.2-10.55-3.33-20.25l-.24-2.04-.23-2.03c-1.82-15.7-3.07-21.98-5.55-24.47-2.46-2.46-3.04-5.03-2.52-8.64.1-.6.18-1.1.39-2.15.69-3.54.77-5.04.08-6.84-.91-2.38-3.31-4.41-7.79-6.26-5.08-2.09-6.52-4.84-4.89-8.44.66-1.45 1.79-3.02 3.52-5.01 1.04-1.2 5.48-5.96 5.08-5.53 6.15-6.7 8.98-11.34 8.98-16.48a15.2 15.2 0 0 1 6.5-12.89v1.26a14.17 14.17 0 0 0-5.5 11.63c0 5.47-2.93 10.29-9.24 17.16.38-.42-4.04 4.33-5.07 5.5-1.67 1.93-2.75 3.43-3.36 4.77-1.37 3.04-.23 5.22 4.36 7.1 4.71 1.95 7.32 4.16 8.34 6.83.78 2.04.7 3.67-.03 7.4-.2 1.03-.3 1.51-.38 2.09-.48 3.33.03 5.59 2.23 7.8 2.74 2.74 3.98 8.96 5.84 25.06l.24 2.03.23 2.04c.82 7.01 1.53 12.06 2.34 16.03v4.33zm0-62.16c-1.4-3.13-4.43-9.9-4.95-11.17-1.02-2.53-1.25-3.8-.91-5.18.2-.84 2.05-4.68 2.32-5.33a70.79 70.79 0 0 0 3.54-11.2v3.99a62.82 62.82 0 0 1-2.62 7.6c-.31.75-2.09 4.46-2.27 5.18-.28 1.12-.08 2.22.87 4.57.41 1.02 2.5 5.7 4.02 9.09v2.45zm0-85.09c-1.65 1.66-3.66 2.9-6.4 4.13-.25.1-13.97 5.47-20.4 8.43-9.35 4.32-16.7 5.9-23.03 5.25-5.08-.53-9.02-2.25-14.77-5.92l-3.2-2.07a77.4 77.4 0 0 0-5.44-3.27c-4.05-2.18-3.25-5.8 1.47-10.47 3.71-3.68 9.6-7.93 18.73-13.8l4.46-2.82c17.95-11.33 18.22-11.5 22.27-14.74 11.25-9 19.69-14.02 26.31-15.1v1.02c-6.37 1.1-14.62 6-25.69 14.86-4.1 3.28-4.34 3.44-22.36 14.8a652.4 652.4 0 0 0-4.45 2.83c-9.07 5.83-14.92 10.05-18.57 13.66-4.31 4.28-4.95 7.13-1.7 8.88 1.7.91 3.29 1.88 5.5 3.3l3.2 2.08c5.64 3.59 9.45 5.25 14.34 5.76 6.13.64 13.32-.9 22.52-5.15 6.46-2.98 20.18-8.35 20.4-8.44 3.04-1.37 5.1-2.71 6.81-4.69v1.47zm0-41.37v1c-6.56.26-12.11 3.13-19.71 9.08l-4.63 3.68a51.87 51.87 0 0 1-4.4 3.14c-.82.52-5.51 3.33-6.22 3.76-3.31 2-6.15 3.8-8.87 5.6a112.61 112.61 0 0 0-8.16 5.92c-4.61 3.72-7.4 6.9-7.97 9.35-.63 2.67 1.48 4.53 7.05 5.46 10.7 1.78 20.92-.05 30.45-4.65a61.96 61.96 0 0 0 17.1-12.2 41.8 41.8 0 0 0 5.36-7.42v1.92a38.94 38.94 0 0 1-4.64 6.19 62.95 62.95 0 0 1-17.39 12.41c-9.7 4.68-20.13 6.55-31.05 4.73-6.06-1-8.65-3.29-7.85-6.67.64-2.74 3.53-6.05 8.31-9.9 2.35-1.9 5.1-3.88 8.24-5.97 2.73-1.82 5.58-3.61 8.9-5.62.72-.44 5.4-3.24 6.22-3.75 1.26-.8 2.6-1.76 4.3-3.09.8-.62 3.9-3.1 4.63-3.67 7.77-6.1 13.49-9.04 20.33-9.3zm0-154.6v1c-1.75-.24-4.3.23-7.82 1.55-10.01 3.75-13.8 5.07-19.15 6.76-1.78.56-2.63.83-3.87 1.24-1.48.5-3.16.76-6.74 1.16a1550.34 1550.34 0 0 0-2.64.3c-7.8.94-11.28 2.47-11.28 6.07 0 4.45 2.89 13.18 7.96 25.81a57.34 57.34 0 0 1 2.33 7.6 258.32 258.32 0 0 1 .84 3.46c1.86 7.62 3.17 10.71 5.56 11.67 2.21.88 4.7.6 7.47-.72 3.48-1.69 7.22-4.94 11.2-9.47 1.52-1.7 2.97-3.49 4.59-5.57l3.16-4.1c2.59-3.23 6.07-12.21 8.39-20.23v3.45c-2.29 7.2-5.27 14.5-7.61 17.41-.44.55-2.67 3.46-3.15 4.09-1.63 2.1-3.1 3.9-4.62 5.62-4.08 4.61-7.9 7.94-11.53 9.7-2.99 1.44-5.77 1.75-8.28.74-2.84-1.13-4.2-4.34-6.15-12.35a2097.48 2097.48 0 0 1-.84-3.46c-.8-3.2-1.47-5.45-2.28-7.46-5.14-12.8-8.04-21.55-8.04-26.19 0-4.37 3.84-6.06 12.16-7.07a160.9 160.9 0 0 1 2.65-.3c3.5-.39 5.15-.64 6.53-1.1 1.26-.42 2.1-.7 3.88-1.26 5.34-1.68 9.11-3 19.1-6.74 3.53-1.32 6.22-1.84 8.18-1.61zM0 292c10.13-11.31 18.13-23.2 23.07-35.39 3.3-8.14 6.09-16.12 10.81-30.55l1.59-4.84c6.53-19.94 10.11-29.82 14.77-39.56 6.07-12.72 12.55-21.18 20.27-25.54 6.66-3.76 10.2-7.86 12.22-13.15a46.6 46.6 0 0 0 1.86-6.58c1.23-5.2 2.05-7.59 3.93-10.36 2.45-3.62 6.27-6.53 12.1-8.96 15.78-6.58 16.73-7.04 18.05-9.01.65-.98.83-2.15.74-4.51-.03-.73-.23-3.82-.24-4A93.8 93.8 0 0 1 119 94c0-10.04.18-11.37 2.37-13.15.52-.42 1.13-.8 2.07-1.3.27-.14 2.18-1.12 2.84-1.48a68.4 68.4 0 0 0 9.12-5.87c2.06-1.54 2.64-2.14 8.01-7.93 3.78-4.09 6.21-6.36 8.96-8.12 3.64-2.33 7.2-3.12 10.9-2.11 4.4 1.2 10.81 2 18.78 2.46 6.9.4 12.9.5 21.95.5 4.87 0 8.97.47 15.4 1.57 7.77 1.33 9.3 1.54 12.38 1.54 4.05 0 7.43-.88 10.68-2.95 5.06-3.22 8.11-4.67 11.2-5.2 3.62-.64 4.77-.46 16.55 2.06 17.26 3.7 30.85 1.36 41.06-9.7 5.1-5.53 5.48-8.9 3.48-14.8-.83-2.42-1.03-3.1-1.17-4.3-.29-2.52.5-4.71 2.71-6.93 2.65-2.65 4.72-9.17 6.22-18.29h2.03c-1.56 9.71-3.77 16.65-6.83 19.7-1.79 1.8-2.36 3.39-2.14 5.28.11 1 .3 1.63 1.07 3.9 2.22 6.53 1.76 10.66-3.9 16.8-10.77 11.66-25.07 14.13-42.95 10.3-11.42-2.45-12.55-2.62-15.78-2.06-2.77.48-5.62 1.84-10.47 4.92a20.93 20.93 0 0 1-11.76 3.27c-3.25 0-4.81-.22-12.73-1.57C212.74 59.46 208.73 59 204 59c-9.1 0-15.11-.1-22.07-.5-8.09-.47-14.62-1.29-19.2-2.54-5.62-1.53-10.17 1.38-17.85 9.66-5.5 5.94-6.08 6.53-8.28 8.18a70.38 70.38 0 0 1-9.38 6.03c-.68.37-2.58 1.35-2.84 1.49-.84.44-1.35.76-1.75 1.08-1.47 1.2-1.63 2.4-1.63 11.6 0 1.85.06 3.54.17 5.44 0 .17.2 3.28.24 4.03.1 2.75-.13 4.29-1.08 5.71-1.67 2.5-2.27 2.8-18.95 9.74-5.48 2.29-8.99 4.96-11.2 8.24-1.71 2.51-2.47 4.73-3.64 9.7-.83 3.5-1.21 4.92-1.94 6.83-2.18 5.73-6.05 10.19-13.1 14.18-7.3 4.12-13.55 12.28-19.46 24.66-4.6 9.64-8.17 19.46-14.67 39.32l-1.58 4.84c-4.75 14.47-7.54 22.48-10.86 30.69-5.28 13.01-13.95 25.65-24.93 37.6v-2.97zm0 78v-.5l1-.01c6.32 0 7.47 5.2 4.6 13.36a60.36 60.36 0 0 1-5.6 11.3v-1.92a57.76 57.76 0 0 0 4.65-9.72c2.69-7.6 1.71-12.02-3.65-12.02-.34 0-.67 0-1 .02v-46.59a340.96 340.96 0 0 0 13.71-8.34c13.66-9.46 29.79-37.6 29.79-53.59 0-18.1 21.57-72.64 32.23-79.42 12.71-8.09 32.24-27.96 35.8-37.75 1.93-5.3 5.5-7.27 14.42-9.37 6.15-1.44 8.64-2.42 10.67-4.79 1.5-1.74 2.72-4.79 4.33-10.3.23-.78 1.9-6.68 2.43-8.46 3.62-12.08 7.3-18.49 13.47-20.39 2.5-.76 3.03-.98 9.74-3.7 7.49-3.03 11.97-4.43 17.12-4.92 6.75-.65 13.13.75 19.55 4.67 5.43 3.32 12.19 4.72 20.17 4.56 6.03-.12 12.2-1.07 19.83-2.8 1.82-.4 7.38-1.74 8.26-1.94 2.69-.6 4.34-.89 5.48-.89 4.97 0 8.93-.05 14.2-.27 7.9-.32 15.56-.92 22.75-1.88 8.5-1.14 15.9-2.73 21.88-4.82 18.9-6.62 32.64-18.3 33.67-27.59.29-2.56.4-2.96 2.79-11.11 2.33-7.95 3.21-12.93 2.72-18.23-.2-2.24-.69-4.38-1.48-6.42-1.5-3.92-2.63-9.4-3.43-16.18h.9c.77 6.47 1.89 11.72 3.47 15.82a24.93 24.93 0 0 1 1.54 6.69c.5 5.46-.4 10.54-2.77 18.6-2.36 8.06-2.47 8.47-2.74 10.95-1.09 9.75-15.1 21.68-34.33 28.41-6.06 2.12-13.52 3.72-22.09 4.87-7.22.96-14.92 1.57-22.83 1.89-5.3.21-9.27.27-14.25.27-1.04 0-2.64.27-5.26.87-.87.2-6.43 1.53-8.26 1.94-7.68 1.73-13.92 2.7-20.03 2.82-8.15.17-15.1-1.27-20.71-4.7-6.23-3.81-12.4-5.16-18.93-4.54-5.04.48-9.44 1.86-16.84 4.86-6.75 2.74-7.29 2.95-9.82 3.73-5.73 1.76-9.28 7.96-12.81 19.72-.53 1.77-2.2 7.66-2.43 8.46-1.66 5.65-2.91 8.78-4.53 10.67-2.22 2.58-4.84 3.62-12.01 5.3-7.8 1.83-11.13 3.66-12.9 8.54-3.65 10.04-23.32 30.06-36.2 38.25C65.94 190 44.5 244.2 44.5 262c0 16.34-16.3 44.78-30.22 54.41-2.14 1.48-8.24 5.12-14.28 8.68v-1.16 46.09zm0-173.7v-1.11c7.42-3.82 14.55-10.23 21.84-18.98 3.8-4.56 14.21-18.78 15.79-20.55 1.8-2.04 4.06-3.96 7.42-6.45 1.08-.8 4.92-3.57 5.49-3.99 9.36-6.85 14-11.96 15.98-19.36.8-2.98 1.54-6.78 2.46-12.3.23-1.44 2-12.46 2.56-15.79 2.87-16.77 5.73-26.79 10.07-32.1C92.46 52.43 101.5 38.13 101.5 33c0-2.54.34-3.35 6.05-15.71.68-1.49 1.25-2.74 1.77-3.93 2.5-5.75 3.9-10.04 4.14-13.36h1c-.23 3.48-1.66 7.87-4.23 13.76-.52 1.2-1.09 2.45-1.78 3.95-5.54 12.01-5.95 12.99-5.95 15.29 0 5.47-9.09 19.84-20.11 33.31-4.2 5.12-7.03 15.06-9.86 31.64-.57 3.33-2.33 14.33-2.57 15.78-.92 5.56-1.67 9.38-2.48 12.4-2.05 7.68-6.82 12.93-16.35 19.91l-5.49 3.98c-3.3 2.45-5.51 4.34-7.27 6.31-1.53 1.73-11.94 15.93-15.76 20.53-7.52 9.02-14.88 15.6-22.61 19.46zm0 361.83v-4.33c.48 2.36 1 4.35 1.6 6.15 2 6.03 4.6 8.26 8.19 6.59C28.76 557.69 43.5 542.4 43.5 527c0-16.2 6.37-31.99 17.1-46.3 1.88-2.5 3.66-4.4 5.53-6 .73-.62 1.45-1.18 2.3-1.8l2-1.43c3.68-2.68 5.32-5.28 7.08-12.59.75-3.07 1.38-5.02 4.2-13.26l.63-1.88c3.24-9.58 4.56-14.97 4.17-18.65-.48-4.43-3.8-5.23-11.3-1.64a81.12 81.12 0 0 1-9.15 3.7c-13.89 4.67-26.96 5.8-42.66 5.42l-1.95-.05-1.45-.02a39.8 39.8 0 0 0-15.05 2.96A21.81 21.81 0 0 0 0 438.37v-1.26a23.55 23.55 0 0 1 4.55-2.57 40.77 40.77 0 0 1 16.92-3.02l1.95.05c15.6.38 28.57-.75 42.32-5.37a80.12 80.12 0 0 0 9.04-3.65c8.04-3.84 12.16-2.85 12.72 2.43.42 3.89-.92 9.34-4.21 19.08l-.64 1.88c-2.8 8.2-3.43 10.15-4.16 13.18-1.82 7.52-3.59 10.34-7.47 13.16l-2 1.43c-.84.6-1.54 1.15-2.25 1.75a35.45 35.45 0 0 0-5.37 5.84c-10.61 14.15-16.9 29.74-16.9 45.7 0 15.88-15 31.45-34.29 40.45-4.3 2.01-7.39-.66-9.56-7.18-.23-.68-.44-1.39-.65-2.13zm0-62.16v-2.45l1.46 3.27c2.1 4.8 3.46 10.33 4.26 16.77.66 5.3.84 9.3 1.04 18.5.2 9.32.5 12.75 1.63 15.05 1.28 2.6 3.67 2.35 8.29-1.5 17.14-14.3 21.82-22.9 21.82-38.62 0-7.17 1.1-12.39 3.7-17.68 2.27-4.67 3.65-6.62 13.4-19.62a69.8 69.8 0 0 1 7.6-8.79 44.76 44.76 0 0 1 3.54-3.06c.38-.3.64-.52.89-.74a10.47 10.47 0 0 0 2.63-3.32 35.78 35.78 0 0 0 2.26-5.94l.37-1.2.36-1.15c.29-.91.48-1.55.66-2.16.45-1.53.74-2.68.91-3.66.38-2.2.12-3.49-.85-4.15-2.35-1.61-9.28-.24-23.8 4.94-9.54 3.4-16.12 4.17-27.85 4.26-7.71.06-10.43.4-13.25 2.12-3.48 2.12-5.84 6.4-7.58 14.26-.5 2.2-.99 4.19-1.49 5.98v-3.98l.51-2.22c1.8-8.1 4.28-12.6 8.04-14.9 3.04-1.85 5.86-2.2 13.77-2.26 11.61-.09 18.1-.84 27.51-4.2 14.93-5.32 21.95-6.71 24.7-4.83 1.38.94 1.71 2.6 1.28 5.15a33.69 33.69 0 0 1-.94 3.78l-.66 2.17-.36 1.15-.37 1.2a36.64 36.64 0 0 1-2.33 6.1c-.8 1.53-1.61 2.52-2.86 3.61l-.92.77-1.02.83c-.9.74-1.65 1.4-2.47 2.18a68.84 68.84 0 0 0-7.48 8.66c-9.7 12.93-11.07 14.87-13.31 19.46-2.52 5.15-3.59 10.22-3.59 17.24 0 16.04-4.82 24.91-22.18 39.38-5.04 4.2-8.18 4.55-9.83 1.18-1.22-2.5-1.52-5.94-1.73-15.47-.2-9.16-.38-13.15-1.03-18.4-.79-6.34-2.12-11.8-4.19-16.49L0 495.98zM379.27 0h1.04l1.5 5.26c3.28 11.56 4.89 19.33 5.26 27.8.49 11.01-1.52 21.26-6.63 31.17-7.8 15.13-20.47 26.5-36.22 34.1-12.38 5.96-26.12 9.17-36.22 9.17-6.84 0-17.24 1.38-37.27 4.62l-2.27.37c-24.5 3.99-31.65 5-37.46 5-3.49 0-4.08-.08-19.54-2.8-3.56-.64-6.32-1.1-9-1.5-20.23-2.96-31-1.2-31.96 7.86-.1.85-.18 1.72-.29 2.81l-.27 2.73c-1.1 10.9-2.02 15.73-4.31 19.96-2.9 5.34-7.77 7.95-15.63 7.95-10.2 0-12.92.6-15.5 3.17.52-.51-5.03 5.85-8.16 8.7-2.75 2.5-14.32 12.55-15.77 13.83a341.27 341.27 0 0 0-6.54 5.92c-6.97 6.49-11.81 11.76-14.6 16.15-5.92 9.3-10.48 18.04-11.69 24.08-1.66 8.3 3.67 9.54 19.02 1.21a626.23 626.23 0 0 1 44.54-21.9c3.5-1.56 14.04-6.2 15.68-6.95 5.05-2.25 8.3-3.8 10.78-5.15l1.95-1.07 2.18-1.18c1.76-.94 3.38-1.76 5-2.55 18.1-8.72 34.48-10.46 50.33-1.2 22.89 13.34 38.28 37.02 38.28 56.44 0 19.12-.73 25.13-5.18 33.2a45.32 45.32 0 0 1-4.94 7.12c-6.47 7.77-11.81 16.2-12.76 21.27-1.2 6.34 4.69 7.03 20.17-.05 13.31-6.08 22.4-14.95 28.5-26.32a80.51 80.51 0 0 0 6.1-15.13c.9-2.98 3.17-11.65 3.41-12.48a29.02 29.02 0 0 1 1.75-4.83c7.47-14.93 21.09-30.5 36.25-37.24 7.61-3.38 13-9.65 19.4-20.79.84-1.48 4.26-7.64 5.14-9.17 3.52-6.1 6.22-9.7 9.37-11.98 10.15-7.4 28.7-11.1 50.29-11.1 7.52 0 16.54-1.24 27.51-3.58a420.1 420.1 0 0 0 14.96-3.52c-1.3.33 15.54-3.98 19.42-4.89 14.15-3.33 41.07-5.01 64.11-5.01 17.36 0 27.82-9.23 38.53-38.67 6.62-18.21 6.62-26.37 2.69-34.35l-1.18-2.37A13.36 13.36 0 0 1 587.5 58c0-4.03 0-4.01 2.5-24.56.46-3.73.8-6.74 1.12-9.64.9-8.45 1.38-15.2 1.38-20.8 0-.94-.02-1.94-.04-3h1c.03 1.06.04 2.06.04 3 0 5.65-.48 12.43-1.39 20.9-.3 2.91-.66 5.93-1.11 9.66-2.5 20.45-2.5 20.47-2.5 24.44 0 1.97.45 3.57 1.45 5.68.24.51 1.16 2.35 1.17 2.36 4.06 8.24 4.06 16.68-2.65 35.13-10.84 29.8-21.63 39.33-39.47 39.33-22.96 0-49.83 1.68-63.89 4.99-3.86.9-20.69 5.2-19.4 4.88a421.05 421.05 0 0 1-14.99 3.53c-11.04 2.35-20.11 3.6-27.72 3.6-21.4 0-39.76 3.67-49.7 10.9-3 2.19-5.64 5.7-9.1 11.68-.87 1.52-4.29 7.68-5.14 9.17-6.49 11.3-12 17.71-19.86 21.2-14.9 6.63-28.38 22.03-35.75 36.77a28.17 28.17 0 0 0-1.69 4.67c-.23.8-2.5 9.49-3.4 12.5a81.48 81.48 0 0 1-6.19 15.3c-6.2 11.56-15.44 20.58-28.96 26.76-16.1 7.36-23 6.55-21.58-1.04 1-5.29 6.4-13.83 12.99-21.73a44.33 44.33 0 0 0 4.82-6.96c4.35-7.88 5.06-13.77 5.06-32.72 0-19.04-15.19-42.4-37.72-55.55-15.57-9.08-31.62-7.38-49.45 1.21a132.9 132.9 0 0 0-7.14 3.71l-1.95 1.07a158.83 158.83 0 0 1-10.85 5.19c-1.65.74-12.18 5.38-15.69 6.95a625.25 625.25 0 0 0-44.46 21.86c-15.95 8.66-22.37 7.16-20.48-2.29 1.24-6.2 5.83-15.02 11.82-24.42 2.85-4.48 7.74-9.8 14.77-16.34 1.98-1.85 4.12-3.79 6.56-5.94 1.46-1.29 13.02-11.33 15.75-13.82 3.09-2.8 8.6-9.14 8.14-8.67 2.82-2.82 5.75-3.46 16.2-3.46 7.5 0 12.04-2.43 14.75-7.42 2.2-4.07 3.11-8.84 4.2-19.59l.26-2.73.3-2.81c.56-5.42 4.47-8.5 11.23-9.6 5.44-.88 12.51-.51 21.86.86 2.7.4 5.47.86 9.04 1.49 15.33 2.7 15.96 2.8 19.36 2.8 5.73 0 12.9-1.03 37.3-5l2.27-.36c20.1-3.26 30.52-4.64 37.43-4.64 9.95 0 23.54-3.18 35.78-9.08 15.57-7.5 28.09-18.73 35.78-33.65 5.02-9.75 7-19.82 6.51-30.67-.37-8.37-1.96-16.08-5.23-27.57L379.27 0zm13.68 0h1.02c.78 3.9 1.92 8.7 3.51 14.88 3.63 14.05 3.06 27.03-.75 38.77a61 61 0 0 1-11.35 20.68 138.36 138.36 0 0 1-19.32 18.77c-11.32 9.02-23.36 15.49-35.95 18.39a258.63 258.63 0 0 1-22.57 4.07c-3.17.44-6.36.85-10.3 1.32l-9.39 1.12c-11.53 1.41-17.45 2.55-21.64 4.46-9.28 4.21-28.35 6.04-49.21 6.04-1.37 0-2.8-.12-4.3-.35-2.62-.41-5-1.03-9.14-2.29-7.34-2.21-9.63-2.75-12.63-2.56-3.9.23-6.63 2.29-8.47 6.89-1.86 4.66-2.42 7.53-3.34 14.98-1.1 8.98-2.87 12.12-9.97 14.3a40.12 40.12 0 0 0-6.8 2.66c-.63.33-1.16.64-1.76 1.02l-1.34.86c-1.9 1.14-3.86 1.49-9.25 1.49-3.2 0-8.83-.55-9.51-.39-1.22.28-.75-.14-7.14 6.24-1.5 1.5-3.49 3.18-6.32 5.37-1.52 1.18-7.16 5.43-7.94 6.03-4.96 3.78-8.33 6.6-11.06 9.38-4.88 4.98-6.85 9.15-5.56 12.7 1.34 3.67 4.07 4.42 8.9 2.82a55.72 55.72 0 0 0 7.77-3.48c1.5-.77 7.78-4.13 9.37-4.96a116.8 116.8 0 0 1 12.31-5.68 162.2 162.2 0 0 0 11.04-4.84c2.04-.97 10.74-5.16 13-6.22 4.41-2.1 8.1-3.78 11.65-5.29 17.14-7.3 29.32-9.9 37.67-6.65l5.43 2.1c2.3.88 4.17 1.62 6.02 2.38a150.9 150.9 0 0 1 13.07 6c18.34 9.63 30.35 22.13 34.79 39.87 6.96 27.85 3.6 45.53-8.08 62.4-3.97 5.75-3.52 9.2.06 8.97 4.14-.28 10.21-4.95 15.11-12.52 3.1-4.8 5.1-10.45 8.05-21.53l1.69-6.35c.66-2.47 1.24-4.52 1.83-6.5 4.93-16.56 11-27.28 21.56-34.76 7.15-5.06 23.73-15.5 25.48-16.75 6.74-4.81 10.53-9.44 14.34-18 7.74-17.44 21.09-24.34 44.47-24.34 9.36 0 17.91-1.13 29.53-3.49a624.86 624.86 0 0 0 6.2-1.28c2.4-.5 4.07-.84 5.66-1.13 4.03-.74 7.04-1.1 9.61-1.1 4.44 0 9.39-1 31.39-5.99l2.95-.66c16.34-3.67 25.64-5.35 31.66-5.35 1.54 0 2.4.01 6.4.1 7.8.15 12.27.13 17.33-.2 16.41-1.06 26.73-5.36 29.8-14.56a87.1 87.1 0 0 1 3.55-8.83c-.15.31 2.29-4.96 2.9-6.38 5.38-12.3 5.57-21.92-1.44-39.44a86.4 86.4 0 0 1-5.26-20.72c-1.61-11.98-1.38-23.14.1-40.35l.2-2.12h1l-.2 2.2c-1.48 17.15-1.7 28.24-.11 40.14a85.4 85.4 0 0 0 5.2 20.47c7.1 17.78 6.91 27.67 1.43 40.22-.62 1.43-3.06 6.72-2.91 6.4a86.17 86.17 0 0 0-3.52 8.73c-3.23 9.72-13.9 14.15-30.68 15.24-5.1.33-9.58.35-17.42.2-3.98-.09-4.84-.1-6.37-.1-5.91 0-15.18 1.67-31.44 5.32l-2.95.67c-22.16 5.02-27.05 6.01-31.61 6.01-2.5 0-5.45.36-9.43 1.09-1.58.29-3.25.62-5.64 1.11a4894.21 4894.21 0 0 0-6.2 1.29c-11.68 2.37-20.3 3.51-29.73 3.51-23.02 0-36 6.71-43.53 23.66-3.9 8.8-7.82 13.58-14.7 18.5-1.78 1.27-18.36 11.7-25.48 16.75-10.34 7.32-16.3 17.87-21.19 34.23-.58 1.96-1.15 4-1.82 6.47l-1.69 6.35c-2.98 11.18-5 16.9-8.17 21.81-5.05 7.81-11.37 12.68-15.89 12.98-4.7.31-5.3-4.23-.94-10.53 11.52-16.64 14.82-34.03 7.92-61.6-4.35-17.42-16.16-29.72-34.27-39.22-4-2.1-8.2-4-12.99-5.97-1.84-.75-3.7-1.49-6-2.38l-5.43-2.08c-8.03-3.12-20.02-.58-36.92 6.63-3.52 1.5-7.21 3.19-11.61 5.27l-13 6.22c-4.71 2.22-8.16 3.75-11.11 4.88a115.87 115.87 0 0 0-12.21 5.63c-1.58.83-7.86 4.18-9.37 4.96a56.55 56.55 0 0 1-7.9 3.54c-5.3 1.75-8.62.85-10.17-3.43-1.46-4.02.66-8.5 5.8-13.74 2.75-2.82 6.16-5.66 11.15-9.48.79-.6 6.43-4.85 7.94-6.02a66.96 66.96 0 0 0 6.23-5.28c6.74-6.74 6.1-6.16 7.61-6.51.87-.2 6.69.36 9.74.36 5.22 0 7.03-.32 8.74-1.35l1.31-.84c.62-.4 1.18-.72 1.84-1.07a41.07 41.07 0 0 1 6.96-2.72c6.64-2.04 8.22-4.84 9.28-13.47.93-7.53 1.5-10.47 3.4-15.24 1.99-4.95 5.04-7.26 9.34-7.51 3.17-.2 5.5.35 12.97 2.6a63.54 63.54 0 0 0 9.02 2.26c1.45.22 2.83.34 4.14.34 20.71 0 39.7-1.82 48.8-5.96 4.32-1.96 10.29-3.1 21.93-4.53l9.4-1.12c3.92-.48 7.11-.88 10.27-1.32 8.16-1.14 15.4-2.43 22.49-4.06 12.42-2.86 24.33-9.26 35.55-18.2a137.4 137.4 0 0 0 19.18-18.64 60.02 60.02 0 0 0 11.15-20.32c3.76-11.57 4.32-24.36.75-38.23A284.86 284.86 0 0 1 392.95 0zM506.7 0h1.26c-.5.66-.9 1.18-1.17 1.51-3.95 4.96-6.9 7.92-9.82 9.57A10.02 10.02 0 0 1 492 12.5c-2.38 0-4.24.67-6.71 2.21l-2.65 1.71c-4.38 2.8-8.01 4.08-13.64 4.08-5.6 0-9.99-1.26-16.08-4.05a202.63 202.63 0 0 1-2.3-1.06l-2.18-.98c-1.6-.7-2.92-1.17-4.17-1.48a13.42 13.42 0 0 0-3.27-.43c-2.3 0-4.3-.68-11-3.37l-1.56-.62c-5-1.97-8.1-2.82-10.52-2.66-2.93.2-4.42 2.03-4.42 6.15 0 20.76-5.21 50.42-12.15 57.35-7.58 7.59-26.55 23.7-34.06 29.06-13.16 9.4-31.17 20.2-44.11 25.06a106.87 106.87 0 0 1-13.32 4.03c-3.28.78-6.6 1.43-11.25 2.24-.53.1-8.8 1.5-11.5 1.99-4.86.87-9.3 1.74-14 2.76-20.62 4.48-25.07 5.01-38.11 5.01-2.49 0-2.9-.07-14.05-2-2.42-.42-4.31-.73-6.15-1-8.11-1.19-13.83-1.36-17.64-.2-4.54 1.4-5.93 4.65-3.7 10.52 2.02 5.28 4.84 8.61 8.84 10.74 3.26 1.74 6.75 2.6 13.82 3.71 9.42 1.48 10.94 1.75 15.5 2.92a78.2 78.2 0 0 1 18.62 7.37c8.3 4.58 14.58 11.5 19.98 20.89 2.73 4.73 9.46 19.33 10.54 21.19 3.4 5.85 6.26 6.63 10.89 2 4.95-4.94 10.35-8.37 21.13-14.06.47-.25 2.06-1.1 2.12-1.12 7.98-4.21 11.92-6.51 15.87-9.54 5.11-3.9 8.66-8.1 10.77-13.11 8.52-20.24 20.75-33.31 32.46-33.31l5.5.03c10.53.08 17.35.02 24.9-.31 13.66-.62 23.78-2.09 29.39-4.67 5.85-2.7 13.42-5.49 24.18-9.02 3.46-1.14 6.29-2.05 12.7-4.1 7.7-2.45 11.08-3.54 15.17-4.9a1059.43 1059.43 0 0 1 11.33-3.72c3.67-1.2 5.96-2 8.03-2.78a59.88 59.88 0 0 0 6.66-2.94c1.87-.98 3.76-2.1 5.86-3.5 3.48-2.33 6.15-3.13 12.04-4.13l1.15-.2c5.71-1.01 9-2.3 12.76-5.63 7.82-6.96 8.58-23.18 3.84-44.52-1.7-7.67-2.1-19.28-1.57-35.47A837.22 837.22 0 0 1 546.76 0h1l-.15 3.06c-.32 6.42-.53 11.02-.68 15.62-.51 16.1-.12 27.65 1.56 35.21 4.82 21.68 4.04 38.2-4.16 45.48-3.91 3.48-7.37 4.84-13.24 5.87l-1.16.2c-5.76.99-8.32 1.75-11.65 3.98a63.73 63.73 0 0 1-5.96 3.56 60.86 60.86 0 0 1-6.77 2.99c-2.09.79-4.39 1.58-8.07 2.79a5398.31 5398.31 0 0 1-11.32 3.71c-4.1 1.37-7.48 2.46-15.18 4.92-6.42 2.04-9.24 2.95-12.7 4.08-10.73 3.53-18.27 6.3-24.07 8.98-5.76 2.66-15.97 4.14-29.77 4.77-7.56.33-14.4.39-24.95.31l-5.49-.03c-11.19 0-23.16 12.79-31.54 32.7-2.19 5.19-5.84 9.52-11.08 13.52-4.02 3.07-7.99 5.39-16.01 9.62l-2.12 1.12c-10.7 5.65-16.04 9.04-20.9 13.9-5.14 5.14-8.75 4.15-12.45-2.22-1.12-1.92-7.85-16.5-10.54-21.2-5.33-9.24-11.48-16.02-19.6-20.5a77.2 77.2 0 0 0-18.4-7.28c-4.5-1.17-6.02-1.43-15.4-2.9-7.17-1.12-10.74-2-14.13-3.81-4.22-2.25-7.2-5.77-9.3-11.27-2.43-6.39-.78-10.26 4.34-11.83 4-1.22 9.82-1.05 18.08.17 1.84.27 3.74.58 6.17 1 11.02 1.9 11.48 1.98 13.88 1.98 12.96 0 17.35-.52 37.9-4.99 4.71-1.02 9.16-1.9 14.03-2.77 2.71-.48 10.98-1.9 11.5-1.98 4.64-.81 7.95-1.46 11.2-2.23 4.55-1.07 8.76-2.34 13.2-4 12.83-4.81 30.79-15.59 43.88-24.94 7.47-5.33 26.4-21.4 33.94-28.94C407.3 61.98 412.5 32.49 412.5 12c0-4.61 1.86-6.9 5.35-7.15 2.63-.18 5.8.7 10.96 2.73l1.56.62c6.53 2.62 8.53 3.3 10.63 3.3 1.14 0 2.3.16 3.5.46 1.32.33 2.68.82 4.34 1.53a90.97 90.97 0 0 1 3.34 1.52l1.15.54c5.98 2.73 10.23 3.95 15.67 3.95 5.41 0 8.87-1.21 13.1-3.92.2-.13 2.1-1.38 2.66-1.72 2.62-1.63 4.64-2.36 7.24-2.36 1.47 0 2.94-.43 4.47-1.3 2.78-1.56 5.67-4.45 9.54-9.31l.7-.89zM324.54 600h-2.03c.49-2.96.91-6.2 1.28-9.66.44-4.1.76-8.25.98-12.21.08-1.39.14-2.65-.35-7.29-.47-1.94-.93-4.14-1.36-6.54-2.01-11.26-2.66-22.9-1.14-33.78a60.76 60.76 0 0 1 5.18-17.95 70.78 70.78 0 0 1 12.6-18.22c3.38-3.6 5.53-5.5 11.83-10.79 4.5-3.78 6.35-5.56 7.52-7.5.64-1.07.95-2.06.95-3.06 0-1.75 0-1.74-.75-9.23-.36-3.7-.57-6.3-.68-8.96-.5-12.1 1.62-19.6 8.11-21.76 15.9-5.3 25.89-12.1 33.45-25.54C409.6 390.65 425.85 376 436 376c12.36 0 20-1.96 29.41-8.8 6.76-4.92 9.5-6.6 12.47-7.46 2.22-.64 3.8-.74 9.12-.74 1.86 0 3.53-.83 5.57-2.62 1.08-.96 5.11-5.12 5.6-5.6 6.04-5.85 11.98-8.78 20.83-8.78 2.45 0 4.54.04 7.32.12 7.51.23 8.87.17 11.27-.7 3.03-1.1 5.53-3.03 14.75-11.17 8-7.06 10.72-8.92 22.87-16.47 1.44-.9 2.59-1.63 3.69-2.37a69.45 69.45 0 0 0 9.46-7.5c4.12-3.88 8.02-7.85 11.64-11.9v2.98a201.58 201.58 0 0 1-10.27 10.38c-3.18 3-6.2 5.35-9.72 7.7-1.12.76-2.28 1.5-3.75 2.4-12.05 7.5-14.71 9.32-22.6 16.28-9.46 8.35-12.01 10.32-15.39 11.55-2.74 1-4.19 1.06-12.01.82-2.76-.08-4.83-.12-7.26-.12-8.27 0-13.75 2.7-19.43 8.22-.44.43-4.52 4.64-5.68 5.66-2.37 2.09-4.46 3.12-6.89 3.12-5.1 0-6.6.1-8.56.66-2.67.78-5.29 2.37-11.85 7.15-9.8 7.13-17.85 9.19-30.59 9.19-9.22 0-24.96 14.2-34.13 30.49-7.84 13.94-18.24 21.02-34.55 26.46-5.31 1.77-7.21 8.51-6.75 19.78.1 2.6.31 5.19.68 8.84.75 7.62.75 7.58.75 9.43 0 1.38-.42 2.73-1.24 4.09-1.33 2.2-3.26 4.07-7.94 8-6.25 5.24-8.36 7.12-11.67 10.63a68.8 68.8 0 0 0-12.25 17.71 58.8 58.8 0 0 0-5 17.36c-1.49 10.66-.85 22.09 1.13 33.15.43 2.37.88 4.53 1.33 6.44.16.66.3 1.25.6 4.06a249.3 249.3 0 0 1-1.17 16.12c-.37 3.37-.78 6.53-1.25 9.44zm-13.4 0h-1.05l.12-.28c3.07-7.16 4.29-11.83 4.29-18.72 0-3.57-.07-4.93-.76-15.65-.77-12.04-1-19.64-.55-28.3.58-11.5 2.4-22.1 5.81-32.16 1.3-3.8 2.8-7.5 4.55-11.1 3.46-7.14 6.83-12.39 10.42-16.6a59.02 59.02 0 0 1 4.35-4.56c.43-.4 3-2.8 3.67-3.45 5.72-5.6 7.51-11.52 7.51-29.18 0-18.84 2.9-23.77 15.82-28.24 1.09-.37 1.92-.67 2.77-.98a51.3 51.3 0 0 0 6.1-2.7c4.95-2.6 9.64-6.22 14.44-11.42 25.5-27.63 37.15-35.16 56.37-35.16 8.28 0 14.54-1.95 22-6.3 1.78-1.03 13.82-8.82 18.16-11.27 2.83-1.59 5.66-3.03 8.63-4.39 7.92-3.6 13.97-4.45 26.6-4.8 7.53-.2 10.7-.49 14.26-1.58 4.55-1.4 8.06-4 10.93-8.43 2.2-3.41 6.85-7.08 14.66-12.06 1.61-1.03 3.27-2.05 5.65-3.5 9.53-5.85 11.56-7.13 14.81-9.57 5.34-4 9.3-8.37 13.68-14.77a204.2 204.2 0 0 0 5.62-8.75v1.9c-1.97 3.17-3.4 5.38-4.8 7.42-4.42 6.48-8.46 10.92-13.9 15-3.29 2.46-5.32 3.75-14.89 9.61a375.06 375.06 0 0 0-5.63 3.5c-7.7 4.9-12.26 8.52-14.36 11.76-3 4.63-6.7 7.39-11.48 8.85-3.68 1.12-6.9 1.42-14.53 1.63-12.5.34-18.44 1.18-26.2 4.7a111.08 111.08 0 0 0-8.56 4.35c-4.3 2.43-16.34 10.22-18.15 11.27-7.6 4.43-14.03 6.43-22.5 6.43-18.87 0-30.3 7.4-55.63 34.84-4.88 5.28-9.67 8.97-14.7 11.62-2 1.05-4 1.92-6.23 2.75-.86.32-1.7.62-5.37 1.87-5.08 1.76-7.44 3.25-9.28 6.37-2.23 3.78-3.29 9.94-3.29 20.05 0 17.9-1.87 24.07-7.8 29.89-.69.67-3.27 3.06-3.69 3.46a58.04 58.04 0 0 0-4.28 4.49c-3.53 4.14-6.86 9.32-10.28 16.38a95.19 95.19 0 0 0-4.5 10.99c-3.38 9.97-5.18 20.48-5.76 31.9-.44 8.6-.22 16.17.55 28.17.69 10.76.76 12.12.76 15.72 0 6.35-1.02 10.87-4.35 19zm25.08 0h-1c-.04-4.73.06-9.39.28-15.02.26-6.41-.4-11.79-2.53-24.37l-.31-1.86c-2.12-12.55-2.76-19.35-1.97-26.47 1.03-9.25 4.75-16.68 12-22.67 22.04-18.2 29.81-30.18 29.81-44.61 0-2.6-.3-4.81-.98-8.17-.97-4.79-1.1-5.68-.97-7.57.2-2.56 1.27-4.7 3.56-6.72 2.67-2.35 7.05-4.6 13.72-7.01 9.72-3.5 15.52-9.18 24.3-21.57l1.78-2.5c4.48-6.33 7.1-9.63 10.43-12.78 4.31-4.07 8.98-6.77 14.54-8.17 13.3-3.32 20.37-5.47 25.34-7.64a49.5 49.5 0 0 0 5.28-2.7c1.1-.65 1.75-1.04 4.24-2.6 2.7-1.68 5.22-2.08 11.38-2.28 5.44-.18 7.9-.43 10.97-1.41a21.47 21.47 0 0 0 9.54-6.22c4.87-5.3 10.03-7.61 17.79-8.9 1.07-.18 1.88-.3 3.86-.58 6.9-.97 9.94-1.69 13.48-3.62 4.5-2.45 6.79-4.44 23.46-19.68l3.14-2.85c9.65-8.71 16.12-13.83 21.42-16.48 4.25-2.12 7.6-4.69 11.22-8.6v1.45c-3.42 3.57-6.69 6-10.78 8.05-5.18 2.59-11.61 7.67-21.2 16.32l-3.12 2.85c-16.8 15.35-19.05 17.3-23.66 19.82-3.68 2-6.8 2.75-13.82 3.73-1.97.28-2.78.4-3.84.57-7.56 1.26-12.52 3.48-17.21 8.6a22.47 22.47 0 0 1-9.97 6.5c-3.2 1-5.72 1.27-11.25 1.45-5.98.2-8.39.57-10.89 2.13a144 144 0 0 1-4.25 2.61 50.48 50.48 0 0 1-5.39 2.75c-5.04 2.2-12.15 4.37-25.5 7.7-9.74 2.44-15.26 7.65-24.4 20.56l-1.77 2.5c-8.9 12.54-14.82 18.34-24.78 21.93-6.57 2.36-10.85 4.57-13.4 6.82-2.1 1.86-3.05 3.74-3.22 6.04-.13 1.76 0 2.63.95 7.3.7 3.42 1 5.7 1 8.37 0 14.79-7.93 27-30.18 45.39-7.03 5.8-10.64 13-11.64 22-.78 7-.14 13.73 1.96 26.2l.32 1.85c2.15 12.65 2.8 18.07 2.54 24.58-.22 5.57-.32 10.2-.28 14.98zM95.9 600h-2.04c.68-3.82 1.14-8.8 1.61-15.98.2-3.11.27-4.06.39-5.6 1.3-17.54 4.04-27.14 11.5-33.2 4.65-3.77 7.22-8.92 8.67-16 .51-2.52.7-3.87 1.33-9.17.66-5.5 1.16-8.06 2.24-10.36 1.45-3.09 3.82-4.69 7.39-4.69 14.28 0 38.48 9.12 53.6 20.2 8.66 6.35 21.26 13.32 31.74 17.11 13.03 4.71 21.89 4.41 24.75-1.73 1.7-3.64 1.92-4.11 2.65-5.77 2.93-6.67 4.69-12.2 5.25-17.5.23-2.17.24-4.23.02-6.2-.32-2.75-1.42-4.55-4.08-7.35l-1.32-1.37a30.59 30.59 0 0 1-2.41-2.79 30.37 30.37 0 0 1-2.5-4.07l-1.13-2.14c-1.62-3.1-2.68-4.6-4.12-5.56-5.26-3.5-14.8-5.5-28.55-6.83a272.42 272.42 0 0 0-9.04-.71l-2.18-.17c-9.57-.73-15.12-1.56-19.06-3.2C156.57 471.07 136 450.5 136 440c0-5.34 1.74-9.53 5.47-14.13 1.98-2.44 11.12-11.71 12.79-13.54 4.52-4.97 10.16-9.54 17.68-14.66 2.8-1.9 14.78-9.6 17.49-11.49a50.54 50.54 0 0 0 6.34-5.43c1.53-1.5 6.96-7.13 7.12-7.3 7.18-7.3 12.7-11.56 19.74-14.38 3.36-1.34 8.13-2.79 17.45-5.38a9577.18 9577.18 0 0 1 11.78-3.28 602.6 602.6 0 0 0 12.67-3.7c20.4-6.24 34-12.08 40.79-18.44 8.74-8.2 11.78-13.84 15.73-26.02 2.02-6.22 3.09-9.04 5.07-12.72 9.54-17.71 28.71-39.37 43.5-45.45C383.77 238.25 389 232.34 389 226c0-2.89 2.73-8.4 6.83-13.73 4.76-6.2 10.65-11.36 16.75-14.18 12.5-5.77 33.5-10.09 47.42-10.09 5.32 0 9.83-1.5 16.42-4.89 9.2-4.71 10.1-5.11 13.58-5.11 10.42 0 32.06-2.55 45.76-5.97l3.88-.98 3.47-.89c2.6-.66 4.33-1.08 5.93-1.43 3.9-.86 6.76-1.23 9.58-1.17 2.74.06 5.47.52 8.67 1.48 4.56 1.37 13.71-.9 22.87-5.68a68.07 68.07 0 0 0 9.84-6.2v2.4c-11.09 8.14-25.76 13.66-33.29 11.4a29.72 29.72 0 0 0-8.13-1.4c-2.63-.05-5.36.3-9.11 1.12a238 238 0 0 0-9.33 2.3l-3.9.99C522.38 177.43 500.58 180 490 180c-2.99 0-3.91.4-12.67 4.89-6.85 3.51-11.61 5.11-17.33 5.11-13.65 0-34.35 4.26-46.58 9.9-5.78 2.67-11.42 7.62-16 13.58-3.85 5.02-6.42 10.2-6.42 12.52 0 7.27-5.8 13.82-20.62 19.92-14.27 5.88-33.16 27.21-42.5 44.55-1.9 3.55-2.95 6.28-4.93 12.4-4.05 12.47-7.23 18.39-16.27 26.86-7.08 6.64-20.87 12.57-41.57 18.89a604.52 604.52 0 0 1-12.7 3.71 1495.1 1495.1 0 0 1-11.8 3.28c-9.24 2.58-13.97 4.01-17.24 5.32-6.73 2.69-12.05 6.8-19.05 13.92-.15.15-5.6 5.8-7.15 7.32a52.4 52.4 0 0 1-6.6 5.65c-2.74 1.92-14.75 9.63-17.5 11.5-7.4 5.04-12.94 9.52-17.33 14.35-1.72 1.9-10.8 11.11-12.71 13.46-3.47 4.26-5.03 8.03-5.03 12.87 0 9.5 20 29.5 33.38 35.08 3.67 1.53 9.1 2.34 18.45 3.05a586.23 586.23 0 0 0 4.34.32c3.24.23 5.07.37 6.93.55 14.08 1.37 23.82 3.4 29.45 7.17 1.82 1.2 3.02 2.91 4.8 6.29l1.11 2.13a28.55 28.55 0 0 0 2.34 3.81c.62.83 1.3 1.6 2.26 2.61.23.24 1.1 1.16 1.32 1.37 2.93 3.09 4.24 5.23 4.61 8.5.24 2.12.23 4.33-.01 6.64-.59 5.55-2.4 11.25-5.41 18.1-.74 1.67-.96 2.15-2.66 5.8-3.49 7.47-13.33 7.8-27.25 2.77-10.67-3.86-23.43-10.92-32.25-17.38C164.62 515.96 140.82 507 127 507c-5 0-6.4 3.02-7.64 13.29a99.03 99.03 0 0 1-1.36 9.33c-1.53 7.5-4.3 13.04-9.37 17.16-6.87 5.58-9.5 14.78-10.77 31.8-.11 1.52-.18 2.47-.38 5.57-.46 7.01-.91 11.99-1.57 15.85zm8.05 0h-1.02c.29-1.41.58-2.94.9-4.59l1.05-5.62c2.5-13.3 4.2-19.92 6.68-24.05 1.7-2.84 3.68-5.5 8.05-11.03 8.21-10.36 10.88-14.55 10.88-18.71l-.02-1.69c-.02-1.78-.02-2.7.02-3.77.21-5.05 1.47-8.2 4.64-9.4 3.92-1.5 10.39.44 20.12 6.43 9.56 5.88 17.53 10.7 25.91 15.66 1.31.78 14.27 8.41 17.67 10.45a714.21 714.21 0 0 1 6.42 3.9c13.82 8.5 38.94 5.05 46.3-7.83 3.6-6.28 4.54-8.52 7.78-17.32a82.3 82.3 0 0 1 1.18-3.07 42.27 42.27 0 0 1 4.06-7.64c9.33-13.98 14.92-26.1 14.92-36.72 0-3.66.75-6.62 3.36-14.85.52-1.64.83-2.66 1.15-3.73 3.64-12.23 3.04-19.12-4.29-24a23.1 23.1 0 0 0-9.98-3.78c-7.2-.93-14.49 1.17-23.91 5.88-1.55.78-6.64 3.44-7.6 3.93a62.6 62.6 0 0 0-4.14 2.3l-4.4 2.66c-11.62 6.92-20.4 9.18-32.81 6.08-3.32-.84-6.24-1.4-13.1-2.64-13.25-2.39-18.7-3.75-23.33-6.46-6.23-3.67-7.46-9.02-2.88-16.65A93.1 93.1 0 0 1 172 415.42a157 157 0 0 1 8.32-7.66c-.07.05 6.16-5.3 7.82-6.77a85.12 85.12 0 0 0 6.5-6.33c7.7-8.46 12.78-13.36 20.08-18.57 9.94-7.1 21.4-12.36 35.18-15.58 37.03-8.64 51-12.7 58.83-17.93 8.6-5.73 21.3-24.77 36.84-54.81 5.22-10.1 12.27-18.4 21.13-25.71 5.13-4.24 9.56-7.25 17.55-12.23 7.42-4.62 9.62-6.14 11.38-8.16a21.15 21.15 0 0 0 2.95-4.87c.61-1.3 2.87-6.47 3-6.77 1.36-3 2.56-5.4 3.95-7.73 6.53-10.97 16.03-18 31.4-20.8 12.73-2.3 19.85-2.7 29.68-2.3 3.25.13 4.13.16 5.6.14 5.15-.07 9.71-1.04 16.61-3.8 20.74-8.3 38.75-12.04 59.19-12.04 3.05 0 6.03.15 10.48.48l2.09.16c12.45.96 18.08.96 25.34-.63a49.65 49.65 0 0 0 14.09-5.45v1.15a50.52 50.52 0 0 1-13.88 5.28c-7.38 1.61-13.08 1.61-25.63.65l-2.08-.16c-4.43-.33-7.39-.48-10.41-.48-20.3 0-38.2 3.72-58.81 11.96-7.01 2.8-11.7 3.8-16.97 3.88-1.5.02-2.39-.01-5.66-.14-9.76-.4-16.8-.01-29.47 2.3-15.06 2.73-24.32 9.58-30.71 20.31a72.8 72.8 0 0 0-3.9 7.63c-.12.28-2.39 5.47-3.01 6.79a22 22 0 0 1-3.1 5.1c-1.86 2.13-4.07 3.66-11.6 8.35-7.95 4.96-12.35 7.95-17.44 12.15-8.76 7.23-15.73 15.43-20.89 25.4-15.61 30.2-28.36 49.32-37.16 55.19-7.98 5.32-21.97 9.39-59.17 18.07-13.65 3.18-24.98 8.39-34.82 15.42-7.22 5.16-12.27 10.01-19.92 18.43a86.07 86.07 0 0 1-6.57 6.4c-1.67 1.48-7.91 6.83-7.84 6.77-3.27 2.84-5.8 5.16-8.26 7.62a92.1 92.1 0 0 0-14.27 18.13c-4.3 7.16-3.22 11.89 2.53 15.26 4.47 2.63 9.88 3.99 23.24 6.39a185.7 185.7 0 0 1 12.92 2.6c12.11 3.03 20.64.84 32.06-5.96l4.4-2.65c1.66-1 2.96-1.73 4.2-2.35.95-.48 6.04-3.14 7.6-3.92 9.59-4.8 17.04-6.94 24.49-5.98a24.1 24.1 0 0 1 10.4 3.93c7.82 5.21 8.45 12.52 4.7 25.13-.32 1.07-.64 2.1-1.16 3.74-2.57 8.12-3.31 11.04-3.31 14.55 0 10.88-5.66 23.14-15.08 37.28a41.28 41.28 0 0 0-3.97 7.46c-.37.9-.73 1.82-1.18 3.04-3.25 8.85-4.21 11.13-7.84 17.47-7.67 13.42-33.43 16.95-47.7 8.18a578.4 578.4 0 0 0-6.4-3.89c-3.4-2.04-16.36-9.67-17.67-10.45-8.38-4.97-16.36-9.78-25.92-15.66-9.5-5.85-15.7-7.7-19.24-6.36-2.68 1.02-3.8 3.82-4 8.51a61.12 61.12 0 0 0-.02 3.72l.02 1.7c0 4.5-2.69 8.73-11.52 19.87-3.92 4.95-5.87 7.59-7.55 10.39-2.39 3.97-4.08 10.56-6.56 23.72l-1.05 5.62-.86 4.4zm10.5 0h-1c.03-.34.04-.68.04-1 0-12.39 8.48-33.57 19.16-43.37a26.18 26.18 0 0 0 3.67-4.17 35.8 35.8 0 0 0 2.88-4.9c.36-.72 1.75-3.66 2.1-4.36 3.22-6.29 6.84-6.54 16.97.39 1.34.9 6.07 4.16 6.4 4.38 2.62 1.8 4.67 3.2 6.7 4.56 5.03 3.39 9.37 6.2 13.51 8.7 14.33 8.67 25.49 13.27 34.11 13.27 16.86 0 32.71-5.95 39.6-14.8 1.59-2.04 3.2-5.17 5.06-9.63.8-1.92 1.64-4.06 2.67-6.8l2.74-7.33c4.66-12.44 7.76-19.06 11.56-23.27 7.9-8.79 14.87-36 14.87-52.67 0-1.9.17-3.11 1.02-8.27.37-2.2.58-3.6.74-5.07.63-5.51.21-9.46-1.68-12.39-4.6-7.1-19.7-9.23-38.46-4.78a100.57 100.57 0 0 0-18.94 6.3c-5.17 2.37-17.11 9.74-16.5 9.4-6.72 3.64-12.97 4.15-24.8 1.3-29.55-7.14-30.43-8.62-15.26-26.81 17.44-20.93 47.12-46.18 56.38-46.18 9.92 0 53.84-11.98 65.78-17.95 9.46-4.73 24.32-21.18 36.82-37.85.71-.95 13.5-21.6 19.2-29.6 9.35-13.13 18.22-22.55 26.95-27.53 7.29-4.17 13.16-10.28 18.8-18.73 1.93-2.9 10.52-17.65 12.73-20.41 1.54-1.93 3-3.21 4.52-3.89 14.07-6.25 24.22-9.04 39.2-9.04h29c4.05 0 7.36-.4 22.93-2.5l4.3-.57c9.92-1.3 16.57-1.93 21.77-1.93 1.66 0 2.95.01 6.03.04 18.61.19 28.55-.48 44.86-4.03 3.1-.67 6.13-1.78 9.11-3.31v1.12a37.96 37.96 0 0 1-8.9 3.17c-16.4 3.56-26.4 4.24-45.08 4.05-3.08-.03-4.36-.04-6.02-.04-5.15 0-11.76.63-21.64 1.92l-4.3.58c-15.64 2.11-18.94 2.5-23.06 2.5h-29c-14.81 0-24.84 2.75-38.8 8.96-1.34.6-2.69 1.78-4.14 3.6-2.16 2.68-10.72 17.39-12.68 20.33-5.72 8.57-11.7 14.8-19.13 19.04-8.57 4.9-17.36 14.23-26.63 27.24-5.68 7.97-18.47 28.64-19.22 29.63-12.6 16.8-27.52 33.32-37.18 38.15-12.06 6.03-56.14 18.05-66.22 18.05-8.82 0-38.39 25.15-55.62 45.82-14.6 17.52-14.19 18.21 14.74 25.2 11.6 2.8 17.6 2.3 24.09-1.2-.67.35 11.31-7.03 16.56-9.44 5.41-2.48 11.6-4.59 19.11-6.37 19.13-4.53 34.65-2.35 39.54 5.22 2.05 3.17 2.48 7.32 1.84 13.04a96.34 96.34 0 0 1-.75 5.13c-.84 5.08-1.01 6.29-1.01 8.1 0 16.9-7.03 44.33-15.13 53.33-3.68 4.09-6.76 10.65-11.37 22.96-.35.93-2.2 5.94-2.73 7.33-1.04 2.76-1.88 4.9-2.68 6.84-1.9 4.53-3.55 7.73-5.2 9.85-7.1 9.13-23.25 15.19-40.39 15.19-8.86 0-20.15-4.65-34.63-13.42-4.15-2.51-8.5-5.32-13.55-8.72a861.54 861.54 0 0 1-6.71-4.56l-6.4-4.39c-9.68-6.63-12.61-6.42-15.5-.75-.35.68-1.74 3.62-2.1 4.35a36.77 36.77 0 0 1-2.96 5.03c-1.12 1.57-2.37 3-3.81 4.33-10.47 9.6-18.84 30.51-18.84 42.63l-.03 1zm-29.65 0h-1.1c1.17-2.52 1.79-5.2 1.79-8 0-20 4.83-42.04 12.15-49.35 5.17-5.18 7.77-8.38 9.9-12.74 2.64-5.41 3.95-12 3.95-20.91 0-6.82 1.14-11.59 3.37-15.07 1.74-2.7 3.6-4.21 8.91-7.52a31.64 31.64 0 0 0 3.9-2.79c4.61-3.96 6.58-6.2 7.72-9.41 1.43-4.02.93-9.04-1.86-16.02a68.98 68.98 0 0 0-3.99-8.07l-.93-1.7a75.47 75.47 0 0 1-2.64-5c-5.16-10.71-3.77-18.9 7.68-29.78a204 204 0 0 1 26.81-21.55c3.96-2.69 16.8-10.8 19.24-12.5 1.99-1.4 4.33-3.3 7.77-6.3-.02 0 7.23-6.39 9.47-8.3 4.97-4.26 9.09-7.5 13.05-10.15 4.72-3.15 8.97-5.28 12.87-6.32 12.78-3.41 15.6-4.18 21.77-5.97 12.55-3.64 21.96-6.9 28.14-10a45.47 45.47 0 0 1 7.47-2.79c8.66-2.66 12.02-4.1 16.97-8.1 6.78-5.46 13.07-14.25 19.33-27.87 15.97-34.77 19.08-39.39 32.15-49.19 3.14-2.36 6.37-4.1 11.43-6.4l2.33-1.04c11.93-5.35 16.87-8.93 21.1-17.38 1.88-3.77 2.48-6.29 3.37-12.27.78-5.19 1.48-7.56 3.53-10.25 2.57-3.4 7.03-6.27 14.36-9.01 3.37-1.26 7.36-2.5 12.05-3.73 16.33-4.3 25.28-5.36 39.6-5.81 6.9-.22 9.5-.56 12.66-2 1.19-.54 2.36-1.23 3.58-2.11 3.7-2.7 8.14-4.54 13.24-5.67 5.71-1.27 10.69-1.54 18.7-1.45l2.35.02c2.82 0 6.8-1 19.7-4.69 10.83-3.08 15.95-4.31 19.3-4.31.82 0 1.9.13 3.55.41l5.01.9c9.82 1.68 17.44 1.89 25.15-.21 7.98-2.18 14.8-6.77 20.29-14.24V147c-5.47 7.04-12.21 11.42-20.03 13.55-7.88 2.15-15.63 1.94-25.58.23l-5-.9c-1.6-.26-2.64-.39-3.39-.39-3.2 0-8.32 1.22-19.74 4.48-12.35 3.53-16.3 4.52-19.26 4.52l-2.36-.02c-7.94-.1-12.85.17-18.47 1.42-4.97 1.11-9.3 2.9-12.88 5.5a21.4 21.4 0 0 1-3.75 2.22c-3.32 1.5-6 1.87-13.04 2.09-14.25.44-23.13 1.5-39.37 5.77a125.56 125.56 0 0 0-11.95 3.7c-7.17 2.7-11.49 5.46-13.93 8.68-1.9 2.52-2.58 4.76-3.33 9.8-.9 6.08-1.53 8.68-3.47 12.56a30.6 30.6 0 0 1-9.66 11.45c-3.12 2.26-5.95 3.73-11.93 6.4l-2.31 1.04c-5.01 2.27-8.18 3.99-11.25 6.29-12.9 9.68-15.93 14.17-31.85 48.8-6.31 13.76-12.7 22.68-19.6 28.25-5.08 4.1-8.53 5.57-17.3 8.27a44.64 44.64 0 0 0-7.33 2.73c-6.24 3.12-15.7 6.4-28.3 10.06a867.4 867.4 0 0 1-21.8 5.97c-3.77 1.01-7.93 3.1-12.56 6.19a137.35 137.35 0 0 0-12.95 10.07c-2.24 1.92-9.48 8.3-9.48 8.3a98.2 98.2 0 0 1-7.84 6.37c-2.46 1.72-15.32 9.83-19.26 12.5a203 203 0 0 0-26.69 21.45c-11.13 10.58-12.43 18.3-7.47 28.63a74.52 74.52 0 0 0 2.62 4.95l.94 1.7a69.84 69.84 0 0 1 4.03 8.17c2.88 7.2 3.4 12.46 1.89 16.73-1.22 3.43-3.28 5.77-8.02 9.84-1.14.97-2.32 1.8-5.3 3.67-3.92 2.45-5.69 3.89-7.31 6.42-2.13 3.3-3.22 7.89-3.22 14.53 0 9.05-1.34 15.79-4.05 21.34-2.19 4.49-4.85 7.77-10.1 13.01-7.07 7.07-11.85 28.9-11.85 48.65 0 2.8-.58 5.48-1.7 8zm282.54 0h-1.01l-1.1-5.8c-3.08-16.26-4.05-26.2-2.74-37.26.7-5.8.77-9.68.55-15.3-.18-4.45-.17-5.68.19-7.63.78-4.3 3.44-8.53 10.39-16.34 9.07-10.2 12.26-15.41 19.8-30.15 1.35-2.64 2.33-4.47 3.38-6.3.9-1.58 1.82-3.06 2.77-4.5 3.14-4.7 7.03-8.42 16.84-16.81 11.22-9.6 15.5-13.86 18.13-19.13.7-1.4 1.3-2.8 1.93-4.4a206 206 0 0 0 1.49-4.05c3.63-9.94 8.01-13.93 22.9-17.81 4.99-1.3 20.55-5.13 21.38-5.34 16.19-4.1 25.33-7.36 33.48-12.6 5.86-3.77 5.84-3.76 27.66-16.53l2.6-1.52c10.23-6 17.1-10.2 22.73-13.95a149.3 149.3 0 0 0 8.8-6.3 723.7 723.7 0 0 0 6.37-5.08A87.74 87.74 0 0 1 600 342.95v1.12a85.76 85.76 0 0 0-15.49 9.9c.18-.14-4.76 3.84-6.38 5.1a150.3 150.3 0 0 1-8.85 6.35c-5.65 3.76-12.53 7.96-22.78 13.97l-2.6 1.53c-21.8 12.75-21.78 12.74-27.63 16.5-8.27 5.32-17.49 8.61-33.78 12.73-.83.21-16.39 4.04-21.36 5.33-8.03 2.1-13.15 4.5-16.45 7.5-2.66 2.42-4 4.86-5.77 9.7l-1.5 4.07a51.12 51.12 0 0 1-1.96 4.47c-2.72 5.45-7.04 9.75-18.38 19.45-9.73 8.32-13.6 12.02-16.65 16.6a77.18 77.18 0 0 0-2.74 4.45c-1.05 1.81-2.01 3.63-3.35 6.25-7.58 14.81-10.82 20.08-19.96 30.36-6.83 7.7-9.4 11.78-10.15 15.86-.34 1.85-.34 3.04-.17 7.4.22 5.68.14 9.6-.55 15.47-1.3 10.92-.34 20.79 2.73 36.95l1.12 5.99zm-76.59 0h-2.1l1.39-4.3c1.04-3.3 1.93-6.78 2.68-10.4 2.65-12.73 3.27-23.63 3.27-41.3 0-5.71-1.86-9.75-4.13-9.75-2.94 0-6.96 5.61-10.93 17.08C271.14 579.68 258.3 593 238 593c-22.42 0-29.26-1.35-48.42-10.09a87.69 87.69 0 0 1-9.42-5.04c-2.95-1.8-12.78-8.57-14.84-9.72-4.2-2.36-7-2.71-9.72-.99-.63.4-1.26.91-1.9 1.55a57.69 57.69 0 0 1-4.31 3.86 147.88 147.88 0 0 1-3.06 2.44l-1 .8C137.01 582.43 134 587.18 134 597c0 1.02-.02 2.01-.07 3h-2c.05-.99.07-1.98.07-3 0-10.52 3.33-15.78 12.09-22.76a265.61 265.61 0 0 1 2-1.6c.83-.64 1.43-1.13 2.03-1.61a55.76 55.76 0 0 0 4.17-3.74c.74-.73 1.48-1.34 2.24-1.82 3.47-2.2 7-1.75 11.77.93 2.15 1.21 12.03 8 14.9 9.76a85.7 85.7 0 0 0 9.22 4.93C209.29 589.7 215.85 591 238 591c19.25 0 31.49-12.7 41.06-40.33 4.24-12.25 8.66-18.42 12.81-18.42 3.8 0 6.13 5.06 6.13 11.75 0 17.8-.63 28.8-3.3 41.7-.77 3.7-1.68 7.23-2.75 10.6-.4 1.3-.8 2.53-1.19 3.7zm-149.25 0 .5-.94a160.1 160.1 0 0 0 6.53-13.26c2.73-6.29 5.78-9.64 9.24-10.52 3.74-.95 7.15.74 12.56 5.13 5.43 4.4 6.07 4.86 7.73 5.1 1.6.22 4.28 1.14 8.86 2.95 1.3.5 10.78 4.35 13.85 5.55 3.07 1.2 5.85 2.25 8.49 3.18 3.1 1.1 5.98 2.04 8.65 2.81h-3.45c-1.76-.56-3.6-1.18-5.54-1.87a281.2 281.2 0 0 1-8.51-3.19c-3.08-1.2-12.57-5.04-13.86-5.55-4.5-1.78-7.15-2.68-8.63-2.9-1.94-.27-2.53-.7-8.22-5.3-5.17-4.2-8.36-5.78-11.69-4.94-3.1.78-5.94 3.92-8.56 9.95a161 161 0 0 1-6.82 13.8h-1.13zm112.89 0a30.34 30.34 0 0 0 11.27-6.27c1.55-1.36 3.32-3.46 5.34-6.29 1.05-1.46 2.15-3.1 3.41-5.04a349.73 349.73 0 0 0 2.5-3.9l.47-.75.93-1.47a89.17 89.17 0 0 1 3.25-4.86c1.05-1.43 1.82-2.23 2.44-2.46 1.02-.37 1.49.48 1.49 2.04l.01 2.11c.05 6.91-.08 11.32-.7 16.33a48.4 48.4 0 0 1-2.38 10.56h-1.07a46.47 46.47 0 0 0 2.45-10.68c.62-4.96.75-9.33.7-16.2l-.01-2.12c0-.97-.08-1.12-.15-1.1-.36.14-1.05.85-1.97 2.1a88.44 88.44 0 0 0-3.22 4.82l-.92 1.46-.48.75a1268.1 1268.1 0 0 1-2.5 3.92c-1.26 1.95-2.38 3.6-3.44 5.08-2.06 2.88-3.87 5.04-5.5 6.45a30.87 30.87 0 0 1-8.94 5.52h-2.98zm-183.72 0H69.3c3.37-3.43 5.19-8.33 5.19-15 0-18.6-.04-17.35 1.02-20.77.6-1.93 1.5-3.74 3.27-6.63.42-.7 4.92-7.8 6.78-10.86 3.04-4.97 11.04-16.5 12.21-18.56 3.48-6.08 4.72-12.06 4.72-24.18 0-7.85 2.5-14.2 8.1-23.44l2.84-4.63a72.67 72.67 0 0 0 2.49-4.4c1.62-3.15 2.48-5.78 2.62-8.28.2-3.78-1.3-7.29-4.9-10.9-5.13-5.12-8.6-5.43-11.2-1.85-2.12 2.92-3.48 7.74-5.06 16.47-.2 1.03-.82 4.6-.82 4.57-.83 4.67-1.4 7.33-2.1 9.6-1.35 4.42-3.7 7.61-8.36 12.26l-3.26 3.2c-6.38 6.39-9.68 11.51-11.36 19.5l-1.16 5.52c-.87 4.1-1.56 7.04-2.33 9.94-3.67 13.74-9.65 25.97-22.59 44.72-7.68 11.14-11.05 18.87-10.92 23.72h-1c-.12-5.16 3.35-13.05 11.1-24.28 12.87-18.67 18.8-30.8 22.44-44.42.77-2.88 1.45-5.8 2.32-9.89l1.16-5.51c1.73-8.22 5.13-13.5 11.64-20 .63-.64 2.84-2.8 3.25-3.21 4.57-4.54 6.82-7.62 8.12-11.84a81.58 81.58 0 0 0 2.07-9.48l.81-4.57c1.62-8.9 3-13.8 5.24-16.89 3-4.15 7.2-3.78 12.71 1.74 3.8 3.8 5.42 7.58 5.2 11.66-.15 2.66-1.05 5.41-2.73 8.68a73.6 73.6 0 0 1-2.52 4.46l-2.84 4.63c-5.52 9.1-7.96 15.3-7.96 22.92 0 12.28-1.28 18.43-4.85 24.68-1.2 2.1-9.21 13.65-12.22 18.58-1.87 3.06-6.37 10.18-6.78 10.86-1.73 2.82-2.6 4.57-3.17 6.4-1.02 3.28-.98 2.1-.98 20.48 0 6.52-1.7 11.44-4.82 15zM310.09 0h1.06c-.37.9-.77 1.83-1.2 2.82-3.9 9.06-5.45 15.15-5.45 25.18 0 7.64-2.1 11.6-6.64 13.05-3.46 1.1-5.72.98-17.57-.43-11.55-1.36-19.17-1.58-28.16-.14-6.24 2.49-25.91 7.02-32.13 7.02-11.15 0-36.76-2.88-54.12-7.01a22.08 22.08 0 0 0-16.95 2.48c-4.05 2.33-7.09 5.03-13.9 11.97-6.28 6.39-9.53 9.23-13.8 11.5-7.09 3.79-11.22 7.65-13.4 12.27-1.82 3.85-2.33 7.84-2.33 15.29 0 4.4-2.65 6.69-9.45 9.74.1-.05-2.97 1.31-3.84 1.71-8.78 4.06-12.71 8.29-12.71 16.55 0 12.52-4.86 19.22-17.34 27.96l-4.56 3.14c-1.9 1.3-3.3 2.3-4.67 3.3-.92.68-1.79 1.34-2.62 2-7.16 5.62-11 14.54-15.56 33.28-.63 2.57-3.3 14-4.07 17.14a350.44 350.44 0 0 1-5.2 19.33c-1.37 4.5-4.5 15.07-4.96 16.53-1.05 3.4-1.64 4.94-2.46 6.32-.82 1.4-6.85 9.08-12.64 18.27L0 277.98v-1.9l4.58-7.35a270.8 270.8 0 0 1 12.61-18.23c-.3.5 1.35-2.8 2.38-6.12.45-1.44 3.58-12.01 4.95-16.53 1.83-6.03 3.44-12.09 5.19-19.27.76-3.13 3.44-14.56 4.06-17.14 4.62-18.95 8.52-28.02 15.92-33.83.84-.67 1.72-1.33 2.65-2.01 1.38-1.02 2.8-2.01 4.7-3.32l4.54-3.14C73.83 140.57 78.5 134.13 78.5 122c0-8.74 4.2-13.26 13.29-17.45.88-.41 3.96-1.77 3.85-1.73 6.46-2.9 8.86-4.97 8.86-8.82 0-7.6.53-11.7 2.42-15.71 2.29-4.84 6.57-8.85 13.84-12.73 4.15-2.21 7.35-5 14.15-11.93 6.28-6.4 9.36-9.13 13.52-11.53a23.07 23.07 0 0 1 17.69-2.59c17.27 4.12 42.8 6.99 53.88 6.99 6.1 0 25.73-4.53 31.92-7 9.12-1.46 16.83-1.25 28.49.13 11.63 1.38 13.9 1.5 17.15.47 4.06-1.3 5.94-4.85 5.94-12.1 0-10.1 1.56-16.3 6.6-28zm25.12 0h1c.05 5.62.26 11.48.65 19.4.47 9.7.64 14.57.64 21.6 0 9.81-4.68 17.46-13.1 23.16-6.53 4.43-14.94 7.46-24.33 9.33-3.74.54-9.42.56-22.68.23-6.74-.17-9.35-.22-12.39-.22-2.77 0-4.97.43-7.63 1.36-.88.3-4.55 1.74-5.58 2.11-6.55 2.35-13.59 3.53-24.79 3.53-8.1 0-13.58-1.38-22.46-4.9l-3.18-1.25c-12.55-4.87-21.27-5.15-37.18 1.12-11.15 4.39-18.13 9.2-22.28 14.81-3.15 4.26-4.33 7.8-5.94 15.8-1.22 6.09-1.93 8.74-3.5 12.13-1.65 3.53-3.97 5.81-7.07 7.22-2.33 1.07-4.35 1.5-9.32 2.19-9.04 1.27-12.77 3.09-15.61 9.58-3.71 8.48-7.72 13.87-14.22 19.76-2.4 2.18-13.14 11.02-15.91 13.42-8.2 7.1-13.85 17.37-18.7 31.97a258.81 258.81 0 0 0-3.27 10.7c-.01.05-2.26 7.97-2.88 10.1-8.49 28.85-17.88 52.95-26.13 61.2-2.8 2.8-5.06 5.64-10.4 12.96-3.4 4.68-6.23 8.25-8.95 11.1v-1.55c2.74-2.98 5.73-6.82 9.48-11.97 4.03-5.52 6.32-8.4 9.17-11.24 8.07-8.08 17.44-32.14 25.87-60.8.62-2.1 2.86-10.03 2.88-10.08 1.21-4.24 2.21-7.53 3.28-10.74 4.9-14.75 10.63-25.16 19-32.4 2.78-2.42 13.5-11.25 15.89-13.4 6.4-5.8 10.32-11.09 13.97-19.43 1.68-3.83 4.05-6.31 7.2-7.86 2.4-1.17 4.64-1.67 9.53-2.36 4.54-.63 6.5-1.05 8.7-2.06 2.89-1.31 5.03-3.42 6.58-6.73 1.53-3.3 2.23-5.9 3.43-11.9 1.64-8.14 2.85-11.79 6.11-16.2 4.28-5.79 11.41-10.7 22.73-15.16 16.15-6.36 25.13-6.07 37.9-1.11l3.19 1.26c8.77 3.47 14.13 4.82 22.09 4.82 11.09 0 18.02-1.16 24.46-3.47 1-.36 4.68-1.8 5.58-2.11A22.5 22.5 0 0 1 265 72.5c3.05 0 5.67.05 14.07.26 11.53.29 17.2.27 20.83-.25 9.25-1.85 17.54-4.83 23.94-9.17C332 57.8 336.5 50.46 336.5 41c0-7-.17-11.86-.7-22.7-.35-7.26-.55-12.83-.59-18.3zM93.87 0h2.04c-.7 4-1.61 6.82-3.03 9.47-2.33 4.38-2.85 5.75-5.26 13.03a40.46 40.46 0 0 1-1.94 5.03c-2.24 4.66-5.92 8.8-13.07 14.26-8.01 6.13-14.27 16.55-20.03 31.55-2.4 6.23-8.75 25.63-9.64 28.01-2.69 7.16-6.56 12.7-15.63 23.68l-2.68 3.24c-6.02 7.34-9.35 12.07-11.72 17.15-2.3 4.94-7.12 9.9-12.91 14.15v-2.4c5.14-3.94 9.1-8.3 11.1-12.6 2.46-5.27 5.87-10.1 11.98-17.56l2.68-3.26c8.94-10.8 12.72-16.22 15.3-23.1.88-2.33 7.24-21.74 9.65-28.03 5.89-15.31 12.3-26 20.68-32.41 6.92-5.3 10.4-9.2 12.48-13.55.65-1.35 1.16-2.7 1.85-4.79 2.45-7.4 3-8.83 5.4-13.34A27.68 27.68 0 0 0 93.87 0zm9.07 0h1.02c-1.66 8.3-2.91 12.67-4.54 15.26a59.14 59.14 0 0 0-4.1 8.21c-1.27 3-2.44 6.2-3.5 9.4-.38 1.12-.7 2.16-2.41 5.39a251.48 251.48 0 0 0-12.81 13.3c-3.48 3.96-5.95 7.27-7.15 9.66-.95 1.9-2.06 5.99-3.61 12.97-.64 2.9-3.65 17.15-4.51 21.07-3.63 16.45-6.63 26.69-9.9 32-7.66 12.45-10.64 15.71-37.08 41.1A69.78 69.78 0 0 1 0 179.21v-1.15a69.39 69.39 0 0 0 13.65-10.42c26.4-25.33 29.32-28.55 36.92-40.9 3.2-5.18 6.18-15.37 9.78-31.7.86-3.91 3.87-18.16 4.51-21.06 1.57-7.09 2.7-11.2 3.7-13.2 1.24-2.5 3.76-5.86 7.29-9.89.9-1.03 1.86-2.1 2.86-3.18 2.4-2.6 4.96-5.22 7.53-7.76.9-.88 1.73-1.7 3.37-3.4a129.02 129.02 0 0 1 4.78-13.46 60.07 60.07 0 0 1 4.19-8.35c1.52-2.44 2.74-6.71 4.36-14.74zM83.71 0h1.1c-2.09 4.74-6.03 8.92-11.42 12.3-7.2 4.52-16.5 7.2-24.39 7.2-8.9 0-11.8 7-11.74 21.52 0 1.7.04 3.17.12 5.99.1 3.3.12 4.45.12 5.99 0 5.73-.76 11.3-2.01 16.5a66.67 66.67 0 0 1-2.15 6.97 2597.76 2597.76 0 0 1-7 15.86 4270.8 4270.8 0 0 1-19.9 43.87A54.64 54.64 0 0 1 0 147v-1.65a54.87 54.87 0 0 0 5.55-9.57A4269.82 4269.82 0 0 0 30.7 79.97c.53-1.2.99-2.23 2.44-5.9A69.23 69.23 0 0 0 36.5 53c0-1.52-.03-2.66-.12-5.95-.08-2.83-.12-4.31-.12-6.01-.03-6.79.53-11.62 2.07-15.34 1.94-4.68 5.39-7.19 10.67-7.19 7.7 0 16.81-2.63 23.86-7.05C77.93 8.27 81.66 4.38 83.7 0zm282.63 0h1.01c1.86 10.02 2.18 12.67 2.32 18.3a123.43 123.43 0 0 1 .37 27.83c-.96 8.78-3.1 16.01-6.63 21.15-11.34 16.5-39.8 29.22-66.41 29.22-5.09 0-10.47.28-16.31.83a413.8 413.8 0 0 0-24.37 3.16c-21.56 3.26-27.66 4.01-36.32 4.01-6.92 0-12.2-1.05-21.69-3.9l-2.78-.83c-1.39-.41-2.54-.74-3.65-1.02-8-2.05-14.22-2.04-21.7.72a16.32 16.32 0 0 0-9.17 8.18c-1.6 3.05-2.5 6.06-4.02 12.83-1.5 6.64-2.34 9.52-3.99 12.64a16.16 16.16 0 0 1-9.85 8.36 104.8 104.8 0 0 0-9.5 3.42c-6.55 2.8-10.1 5.57-13.8 10.47-1.33 1.75-1.03 1.3-5.43 7.9-1.98 2.97-4.66 5.8-8.48 9.14-2.01 1.76-10.71 8.83-12.88 10.7-7.37 6.35-12.58 12.14-16.63 19.14-4.22 7.3-7.8 18.3-11.28 33.26-.87 3.73-1.72 7.64-2.64 12.14l-1.18 5.8-1.09 5.45c-1.8 8.96-2.77 13.28-3.77 16.26-6.8 20.44-17.26 42.16-27.13 51.2-5.11 4.7-8.1 7.07-11.1 8.86-.9.54-1.84 1.04-2.92 1.57-.44.22-9.6 4.4-14.1 6.66l-1.22.62v-1.13l.78-.39c4.52-2.26 13.67-6.44 14.1-6.65a41.19 41.19 0 0 0 2.84-1.54c2.94-1.75 5.88-4.09 10.94-8.73 9.71-8.9 20.1-30.51 26.87-50.79.97-2.92 1.94-7.22 3.73-16.13l1.1-5.46a490.5 490.5 0 0 1 3.82-17.96c3.5-15.06 7.1-26.14 11.39-33.54 4.11-7.11 9.4-12.98 16.83-19.4 2.19-1.88 10.88-8.95 12.88-10.7 3.77-3.28 6.39-6.05 8.3-8.93 4.43-6.64 4.12-6.18 5.47-7.96 3.8-5.03 7.5-7.91 14.21-10.78 2.61-1.12 5.74-2.24 9.59-3.46a15.17 15.17 0 0 0 9.27-7.86c1.59-3.02 2.42-5.85 4.03-12.99 1.41-6.27 2.32-9.33 3.98-12.48a17.31 17.31 0 0 1 9.7-8.66c7.7-2.83 14.1-2.84 22.3-.75 1.12.29 2.28.61 3.68 1.03l3.73 1.11c8.47 2.54 13.66 3.58 20.46 3.58 8.59 0 14.67-.75 36.18-4a414.64 414.64 0 0 1 24.41-3.17c5.88-.54 11.29-.83 16.41-.83 26.3 0 54.45-12.58 65.59-28.78 3.42-4.98 5.5-12.06 6.46-20.7.84-7.74.73-16.02.02-23.9a136.2 136.2 0 0 0-.57-5.12c0-4.47-.3-6.94-2.16-17zM18.88 0h1.03C18 7.57 17.15 10.18 14.46 16.2c-1.95 4.37-2.67 9.19-2.42 14.89.2 4.33.71 7.7 2.28 16.13 1.09 5.88 1.57 8.77 1.94 12.2.96 8.9.24 16.08-2.8 22.79A463.4 463.4 0 0 1 0 109.43v-2.12a465 465 0 0 0 12.54-25.52c2.97-6.52 3.67-13.53 2.72-22.27-.36-3.4-.84-6.26-1.93-12.12-1.57-8.47-2.1-11.88-2.29-16.27-.26-5.84.48-10.81 2.5-15.33 2.64-5.9 3.48-8.47 5.34-15.8zm280.47 0a70.78 70.78 0 0 1-4.91 11.24c-2.56 4.7-4.01 8.45-4.86 11.98l-.4 1.8-.28 1.45a5.28 5.28 0 0 1-.74 2.07c-.74 1.03-1.93 1.28-5.13 1.25.92 0-9.85-.29-15.03-.29-10.2 0-18.45.82-29.46 2.56-16.87 2.66-17.73 2.77-23.66 2.52a42.57 42.57 0 0 1-8-1.09c-17.7-4.16-46.18-5.86-54.72-3.01-2.72.9-5.88 2.8-9.52 5.59a112.37 112.37 0 0 0-6.54 5.48c-1.4 1.25-9.17 8.5-10.78 9.84-1.45 1.2-8.18 7.42-8.85 8.02a114.65 114.65 0 0 1-4.55 3.9c-4.99 4.03-8.9 6.2-11.92 6.2-3.52.05-4.32 0-5.14-.4-1.13-.56-1.5-1.72-1.13-3.57.74-3.63 4.47-10.84 12.84-24.8 5.69-9.48 9.42-18 11.78-26.2 1.45-5.04 1.94-7.4 2.97-14.54h1.01c-1.05 7.3-1.54 9.7-3.01 14.82-2.39 8.28-6.16 16.89-11.9 26.44-8.3 13.84-12 21.01-12.7 24.48-.3 1.45-.08 2.14.59 2.47.6.3 1.35.35 3.48.3 3.92 0 7.69-2.1 12.5-5.98a114.6 114.6 0 0 0 4.51-3.86c.66-.59 7.41-6.83 8.88-8.05 1.59-1.33 9.34-8.55 10.75-9.82 2.4-2.15 4.55-3.96 6.6-5.53 3.72-2.85 6.97-4.8 9.81-5.74 8.76-2.92 37.41-1.22 55.27 2.99 2.57.6 5.14.95 7.81 1.06 5.84.25 6.7.14 23.47-2.51 11.05-1.75 19.36-2.57 29.6-2.57 5.2 0 15.99.3 15.05.29 2.87.03 3.84-.17 4.3-.83.23-.32.4-.8.58-1.7l.28-1.43.4-1.85c.88-3.6 2.36-7.44 4.96-12.22A69.5 69.5 0 0 0 298.29 0h1.06zm-8.59 0c-5.91 17.94-9.55 22-19.76 22-4.5 0-10.22.32-28.69 1.5l-1.53.1c-15.6.99-23.47 1.4-28.78 1.4-5.35 0-13.24-.96-28.86-3.28l-1.54-.23C163.18 18.75 157.47 18 153 18c-4.45 0-7.3 1.01-10.96 3.34-.1.06-1.8 1.17-2.3 1.47-2.43 1.5-4.32 2.19-6.74 2.19-2.8 0-4.11-1.46-4.11-4.22 0-1.04.16-2.29.5-4.1.16-.82.9-4.4 1.07-5.32.8-4.11 1.3-7.68 1.47-11.36h2c-.17 3.82-.68 7.5-1.5 11.75-.19.94-.92 4.5-1.07 5.31a21.04 21.04 0 0 0-.47 3.72c0 1.7.46 2.22 2.11 2.22 1.99 0 3.55-.57 5.7-1.9.47-.28 2.15-1.37 2.26-1.44C144.92 17.14 148.12 16 153 16c4.62 0 10.3.74 28.9 3.51l1.53.23C198.93 22.04 206.8 23 212 23c5.25 0 13.11-.41 28.65-1.4l1.54-.1C260.73 20.32 266.43 20 271 20c8.95 0 12.15-3.4 17.66-20h2.1zM141.51 0h1.13c-2.06 3.86-2.63 5.1-2.77 6.19-.15 1.12.42 1.64 2.32 1.96 1.8.3 3.85.35 10.81.35 6.02 0 13 .56 21.35 1.62 3.95.5 8.03 1.1 13.13 1.89 24 3.7 22.5 3.49 26.83 3.49 24.02 0 51.83-2.24 60.45-6.94 2.88-1.57 5.05-4.49 6.6-8.56h1.07c-1.64 4.47-3.98 7.69-7.2 9.44-8.83 4.82-36.67 7.06-60.92 7.06-4.41 0-2.84.22-26.98-3.5-5.1-.8-9.17-1.38-13.1-1.88-8.31-1.06-15.26-1.62-21.23-1.62-7.04 0-9.1-.05-10.97-.37-2.38-.4-3.38-1.32-3.15-3.07.16-1.22.69-2.41 2.63-6.06zm76.4 0c5.69 1.64 10.37 2.5 14.09 2.5 9.59 0 16.7-.71 22.4-2.5h2.98C251.12 2.53 243.2 3.5 232 3.5c-4.5 0-10.32-1.21-17.53-3.5h3.45zM70.69 0c-2.87 3.27-6.95 5.39-12.02 6.53-3.98.89-7.5 1.08-12.92 1A97.24 97.24 0 0 0 44 7.5c-5.37 0-8.86-1.24-10.1-4.97A8.6 8.6 0 0 1 33.5 0h.99c.02.82.14 1.56.36 2.22C35.91 5.39 39.02 6.5 44 6.5l1.76.02c5.35.09 8.8-.1 12.69-.97C62.95 4.54 66.63 2.74 69.3 0h1.37zM0 207.87c7.31-.16 11.5 3.33 11.5 11.13 0 11.41-5.05 28.35-11.5 41.5v-2.3c5.93-12.72 10.5-28.47 10.5-39.2 0-7.18-3.7-10.3-10.5-10.13v-1zm0 7.05c1.23.14 2.18.58 2.87 1.31 1.4 1.48 1.6 3.72 1.16 7.58l-.16 1.3A28.93 28.93 0 0 0 3.5 229c0 3.2-1.48 9.52-3.5 15.9v-3.45c1.49-5.13 2.5-9.87 2.5-12.45 0-.98.08-1.75.37-4.02l.16-1.29c.42-3.56.24-5.59-.88-6.77-.5-.53-1.21-.87-2.15-1v-1zM0 410.9v-1.47a21.67 21.67 0 0 0 2.97-4.7c1.32-2.7 2.68-6.28 4.56-11.89 7.85-23.55 7.83-26.6.25-30.4-2.25-1.12-4.8-1.43-7.78-.91v-1.02a13.1 13.1 0 0 1 8.22 1.04c8.24 4.12 8.26 7.6.25 31.6-1.88 5.66-3.25 9.27-4.6 12.02A20.82 20.82 0 0 1 0 410.9zM33.64 452c1.68 0 3.04-.23 8.34-1.31l2.38-.47c8.26-1.57 12.72-1.3 14.53 2.33 1.38 2.75-.47 5.86-4.75 9.68a75.6 75.6 0 0 1-5.08 4.07c-.94.7-4.89 3.59-5.79 4.27-1.86 1.4-2.97 2.37-3.47 3.03a19.08 19.08 0 0 0-2.89 5.5c.07-.2-4.02 13.65-6.96 22.22-2.7 7.85-5.56 10.72-8.82 8.59-2.11-1.4-3.66-4.24-6.6-11.03-1.98-4.62-2.5-5.76-3.4-7.4-4.55-8.18-3.9-23.9-.05-32.87a9.6 9.6 0 0 1 6.98-5.96c2.59-.66 4.86-.75 11.78-.67l3.8.02zm0 2c-1.13 0-2.09 0-3.82-.02-12.07-.13-14.83.57-16.9 5.41-3.63 8.47-4.26 23.55-.05 31.12.96 1.73 1.48 2.88 3.5 7.58 2.72 6.3 4.24 9.08 5.86 10.14 1.64 1.08 3.5-.8 5.82-7.55a682.9 682.9 0 0 0 6.97-22.24 21.03 21.03 0 0 1 3.18-6.04c.65-.87 1.85-1.9 3.86-3.43.92-.7 4.87-3.57 5.8-4.27 2.02-1.5 3.6-2.77 4.95-3.97 3.63-3.23 5.09-5.7 4.3-7.28-1.21-2.42-5.07-2.65-12.38-1.27l-2.35.47c-5.49 1.11-6.86 1.35-8.74 1.35zm345.63 146c-3.45-12.26-3.77-14.13-3.77-19 0-3.33-.13-6.27-.43-11.34-.63-10.33-.65-13.5.26-17.07 1.21-4.74 4.21-7.1 9.67-7.1h26c4.08 0 5.19 1.85 5.93 7.11.1.79.13.97.19 1.32.84 5.35 2.8 7.58 8.88 7.58 3.64 0 5.54.4 6.43 1.37.76.83.76 1.44.36 3.93-.85 5.26.5 8.85 7.5 13.8 6.32 4.45 11.63 5.36 16.55 3.37 3.8-1.54 6.73-4.16 11.92-10l1.1-1.23 1.09-1.23a75.6 75.6 0 0 1 2.7-2.86 35.81 35.81 0 0 1 9.57-6.73c1.52-.76 1.72-.86 5.66-2.63 6.1-2.73 9.01-4.5 11.74-7.62 2.63-3 4.67-4.85 6.7-6.04 3.18-1.85 5.46-2.13 13.68-2.13 5.98 0 10.56-4.32 18-14.99l2.82-4.03c1.06-1.5 1.94-2.7 2.79-3.79 7.87-10.12 19.38-10.4 30.74.96 5.54 5.53 10.17 19.43 13.64 38.51 2.5 13.75 4.18 29.46 4.47 39.84h-1c-.3-10.32-1.96-25.97-4.45-39.66-3.43-18.87-8.02-32.65-13.36-37.99-10.95-10.95-21.76-10.68-29.26-1.04-.83 1.07-1.7 2.26-2.75 3.75l-2.81 4.02c-7.65 10.95-12.38 15.42-18.83 15.42-8.04 0-10.21.26-13.17 2-1.92 1.12-3.9 2.9-6.45 5.83-2.86 3.26-5.87 5.09-12.09 7.88a103.35 103.35 0 0 0-5.62 2.6 34.84 34.84 0 0 0-9.32 6.54 74.67 74.67 0 0 0-3.75 4.05l-1.1 1.24c-5.28 5.95-8.29 8.64-12.28 10.25-5.26 2.13-10.92 1.17-17.5-3.48-7.33-5.17-8.82-9.15-7.92-14.77.34-2.12.34-2.6-.1-3.1-.64-.69-2.34-1.04-5.7-1.04-6.63 0-8.96-2.63-9.87-8.42l-.2-1.34c-.67-4.82-1.53-6.24-4.93-6.24h-26c-5 0-7.6 2.04-8.7 6.34-.88 3.43-.85 6.57-.23 16.76a177 177 0 0 1 .43 11.4c0 4.78.32 6.63 3.81 19h-1.04zm13.68 0c-1.31-6.58-1.61-10.71-1.36-14.84.04-.7.1-1.44.18-2.38l.23-2.56c.34-3.81.5-6.97.5-11.22 0-4.94 1.46-7.76 4.21-8.42 2.38-.58 5.56.54 9.2 3 6.64 4.52 13.99 13.07 16.55 19.23 4.77 11.44 14.12 15.69 33.54 15.69 8.6 0 14.32-2.35 20.67-7.88 1.45-1.26 15.06-15 21-20 7.21-6.07 11.77-7.59 20.62-8.32 5.52-.45 7.98-.9 11.44-2.36 4.58-1.95 9.36-5.48 14.9-11.29 7.43-7.76 13.25-8.92 17.47-4.3 3.32 3.63 5.46 10.58 6.82 20.24.73 5.17.94 7.74 1.58 17.38.25 3.75.17 5.32-.92 18.03h-1c1.09-12.7 1.17-14.28.92-17.97-.64-9.6-.85-12.16-1.57-17.3-1.33-9.47-3.43-16.27-6.56-19.7-3.76-4.11-8.93-3.08-16 4.32-5.65 5.9-10.54 9.5-15.25 11.5-3.58 1.53-6.13 1.99-11.6 2.44-8.8.72-13.17 2.18-20.2 8.1-5.9 4.96-19.5 18.7-21 19.99-6.52 5.68-12.47 8.12-21.32 8.12-19.78 0-29.5-4.42-34.46-16.3-2.49-5.97-9.71-14.38-16.2-18.79-3.42-2.32-6.36-3.35-8.4-2.86-2.2.53-3.44 2.92-3.44 7.45 0 4.28-.16 7.47-.5 11.31l-.23 2.56c-.09.93-.14 1.65-.19 2.35-.24 4.08.06 8.18 1.39 14.78h-1.02zm113.75 0c2.52-3.26 8.93-11.79 10.9-14.3 5.48-6.98 13.05-12.38 19.4-13.94 7.01-1.71 11.5 1.45 11.5 9.24 0 4.02-.04 5.16-.74 19h-1c.7-13.85.74-15 .74-19 0-7.12-3.86-9.83-10.26-8.26-6.11 1.5-13.5 6.77-18.85 13.57-1.86 2.36-7.65 10.07-10.43 13.69h-1.26zm-9.86-338.96c3.44 2.71 7 5.1 11.44 7.75 1.06.64 8.42 4.9 10.35 6.1 11.27 7 15 13.35 12.35 25.33-1.45 6.52-4.53 11.1-9.39 14.44-3.83 2.63-8.07 4.26-16.08 6.56-11.97 3.45-13.68 3.99-18.82 6.28a60.18 60.18 0 0 0-7.81 4.18c-11.11 7.07-19.1 7.7-27.96 3.28-3.56-1.77-17.2-11-17.2-11.01a101.77 101.77 0 0 0-5.2-3.07c-16.04-8.83-34.27-24.16-34.52-31.85-.11-3.46 1.99-6.57 6.28-10.26 1.03-.9 2.18-1.81 3.68-2.95.72-.55 3.38-2.56 3.94-3 4.47-3.4 7.18-5.79 9.32-8.45 11.12-13.82 26.55-28.68 34.36-32.28 12.06-5.54 19.84-5.77 27.37.12 3.25 2.54 5.65 6.54 8.58 13.35.29.65 2.3 5.45 2.88 6.74 1.62 3.65 2.9 5.8 4.24 6.94.72.6 1.45 1.2 2.2 1.8zm-3.49-.28c-1.63-1.39-3.03-3.74-4.77-7.65-.58-1.3-2.6-6.12-2.88-6.76-2.81-6.5-5.08-10.3-7.98-12.56-6.83-5.35-13.85-5.15-25.3.12-7.45 3.42-22.7 18.12-33.64 31.72-2.27 2.82-5.08 5.3-9.67 8.79l-3.94 2.98a79.98 79.98 0 0 0-3.59 2.88c-3.87 3.33-5.67 6-5.58 8.69.21 6.64 18.14 21.72 33.48 30.15 1.76.97 3.5 2 5.3 3.13.12.08 13.61 9.22 17.03 10.92 8.22 4.1 15.46 3.52 26-3.18a62.17 62.17 0 0 1 8.07-4.31c5.25-2.35 7-2.9 19.08-6.38 7.8-2.24 11.9-3.82 15.5-6.3 4.44-3.04 7.23-7.18 8.56-13.22 2.44-11.02-.83-16.6-11.45-23.2-1.9-1.18-9.23-5.42-10.32-6.08-4.5-2.69-8.13-5.12-11.64-7.9-.77-.6-1.52-1.21-2.26-1.84zM87.72 241.6c4.3-2.98 7.88-5 12.14-6.95.84-.4 1.73-.78 2.78-1.24l4.37-1.88a164.3 164.3 0 0 0 17.74-8.96 320.67 320.67 0 0 1 27.87-14.5c4.22-1.95 21.89-9.84 21.17-9.52 19.17-8.62 28.1-6.93 49.5 8.05 7.91 5.54 13.24 13.25 16.45 22.66 3.02 8.83 3.76 16.51 3.76 27.75 0 8.32-.66 12.95-3.68 18.97-4.18 8.36-12.3 16.14-25.58 23.47-24.45 13.49-38.83 27.55-52.83 47.84-8.83 12.8-47.76 44.21-65.16 54.15C75.04 413.55 48.89 423.5 31 423.5c-10.05 0-14.67-4.78-14.76-13.37-.07-6.32 2.06-13.73 6.3-24.32 2.95-7.37 2.02-12.9-2.16-22.29-3.19-7.17-3.88-9.14-3.88-12.52 0-3.35 1.87-6.9 5.52-11.07 2.61-3 3.5-3.83 11.9-11.5 5.09-4.66 8.08-7.6 10.7-10.75 9.46-11.36 12.62-19.47 17.9-44.78 3.12-15.05 6.63-20.28 15.12-25.25.8-.47 3.95-2.25 4.7-2.68a76.66 76.66 0 0 0 5.38-3.38zm.56.82a77.63 77.63 0 0 1-5.44 3.43l-4.7 2.67c-8.23 4.82-11.57 9.81-14.65 24.6-5.3 25.45-8.51 33.7-18.1 45.21-2.66 3.19-5.68 6.16-10.8 10.84-8.36 7.64-9.24 8.48-11.82 11.42-3.5 4.01-5.27 7.36-5.27 10.42 0 3.18.68 5.1 3.8 12.12 4.27 9.6 5.24 15.37 2.16 23.07-4.18 10.47-6.29 17.78-6.22 23.93.08 8.06 4.26 12.38 13.76 12.38 17.67 0 43.68-9.9 64.75-21.93 17.28-9.88 56.1-41.2 64.84-53.85 14.08-20.42 28.57-34.59 53.17-48.16 13.12-7.23 21.09-14.87 25.17-23.03 2.92-5.86 3.57-10.35 3.57-18.53 0-11.13-.74-18.73-3.7-27.43-3.15-9.22-8.36-16.75-16.09-22.16-21.13-14.8-29.7-16.42-48.5-7.95.7-.32-16.96 7.56-21.17 9.5-1.7.8-3.3 1.55-4.86 2.3a319.68 319.68 0 0 0-22.93 12.17 165.3 165.3 0 0 1-17.85 9.01l-4.37 1.88c-1.04.45-1.92.84-2.76 1.23a74.56 74.56 0 0 0-11.99 6.86zm-7.6 12.2c7.7-6.25 12.3-8.17 23.68-11.27 6.12-1.67 9.12-2.95 12.31-5.72 3.8-3.3 7.47-4.52 15.86-6.1 2.75-.52 3.67-.7 5.06-1.02 5.48-1.24 9.48-2.93 13.1-5.89 10.42-8.53 25.4-14.11 36.31-14.11 5.33 0 16.77 7.58 25.74 17.16 10.73 11.46 15.96 23.27 12.73 32.5-3.18 9.1-11.39 18.57-23.03 27.86-8.44 6.73-18.36 13-25.22 16.43-3.72 1.86-6.59 4.88-9.77 9.99-.69 1.1-11.1 20.25-16.03 27.83-5.62 8.65-15.4 17.36-30.23 27.96a552.58 552.58 0 0 1-9.2 6.42c-.13.09-6.81 4.65-8.6 5.89-6.47 4.46-10.35 7.35-13.05 9.83-11.64 10.67-37.14 15.54-43.7 8.98-1.96-1.96-2.2-4.06-1.95-10.52.37-9.42-.5-14.5-4.95-20.51a34.09 34.09 0 0 0-7.04-6.92c-3.93-2.95-6.07-6.11-6.56-9.49-.97-6.61 3.87-13.06 14.17-21.69 1.58-1.32 6.67-5.44 7.09-5.78a48.03 48.03 0 0 0 5.23-4.77c4.1-4.63 5.85-9.55 7.8-20.07a501.52 501.52 0 0 0 .8-4.37c.33-1.87.6-3.3.88-4.73.74-3.78 1.5-7.18 2.4-10.63 1-3.78 1.38-5.5 2.36-10.37.6-3.02.93-4.21 1.56-5.47 1.22-2.45 1.27-2.5 12.25-11.42zm.64.78c-10.77 8.74-10.88 8.84-12 11.08-.58 1.16-.88 2.3-1.47 5.22-.98 4.89-1.36 6.63-2.37 10.44-.9 3.43-1.65 6.8-2.39 10.56a339.79 339.79 0 0 0-1.29 6.95l-.39 2.15c-1.98 10.68-3.77 15.74-8.04 20.54a48.77 48.77 0 0 1-5.34 4.88c-.42.34-5.5 4.47-7.07 5.78-10.04 8.4-14.72 14.65-13.83 20.78.45 3.1 2.44 6.03 6.17 8.83 3 2.25 5.39 4.62 7.24 7.12 4.63 6.24 5.52 11.52 5.15 21.15-.25 6.14-.01 8.1 1.66 9.78 6.1 6.1 31.02 1.33 42.31-9.02 2.75-2.52 6.66-5.43 13.16-9.92l8.6-5.89c3.63-2.48 6.45-4.44 9.19-6.4 14.73-10.54 24.44-19.18 29.97-27.7 4.9-7.54 15.31-26.68 16.02-27.8 3.27-5.26 6.26-8.41 10.18-10.37 6.79-3.4 16.65-9.63 25.03-16.32 11.52-9.18 19.61-18.53 22.72-27.4 3.07-8.78-2.02-20.27-12.52-31.49-8.8-9.4-20.04-16.84-25.01-16.84-10.67 0-25.43 5.5-35.68 13.89-3.76 3.07-7.9 4.81-13.5 6.09-1.41.32-2.35.5-5.11 1.02-8.21 1.55-11.76 2.73-15.38 5.88-3.34 2.9-6.45 4.22-12.7 5.92-11.26 3.07-15.75 4.94-23.31 11.09zM212 251.85c0 7.56-.6 10.92-2.6 14.3-1.1 1.84-7.66 10.05-8.6 11.3-5.96 7.94-9.33 10.28-17.26 13.76-1.34.58-2.2 1-3.03 1.5-.55.33-1.2.66-2 1.02-.71.33-4.46 1.9-5.52 2.39-6.05 2.78-8.99 5.8-8.99 10.73 0 10.97-18.95 36.12-34.51 44.87-8.18 4.6-21.3 9.36-32.78 11.86-13.33 2.9-22.49 2.48-24.62-2.32-1.32-2.97-4.4-4.26-11.98-5.81l-.6-.12c-4.84-.99-6.94-1.55-9.03-2.64-2.92-1.5-4.48-3.7-4.48-6.84 0-2.74 1.08-5.77 3.25-9.67.85-1.53 1.82-3.13 3.23-5.35-.16.25 2.83-4.4 3.67-5.76 6.69-10.7 9.85-18.5 9.85-27.22 0-18.41 11.22-33.37 27.5-42.86 5.22-3.05 9.23-3.31 15.2-2.12 5.04 1 6.05.9 7.43-1.52 4.5-7.85 7.04-9.5 15.87-9.5 3.93 0 6.97-.98 10.47-3.16 1.56-.97 8.67-6.17 10.99-7.68 9.2-5.98 11.34-7 25.2-11.95 6.95-2.48 15.18 1.28 22.33 9.12 6.55 7.19 11.01 16.61 11.01 23.67zm-2 0c0-6.5-4.25-15.48-10.49-22.32-6.67-7.32-14.16-10.74-20.17-8.59-13.73 4.9-15.73 5.85-24.8 11.75-2.24 1.46-9.37 6.68-11.01 7.7-3.8 2.36-7.2 3.46-11.53 3.46-8.08 0-9.98 1.23-14.13 8.5-1.1 1.91-2.51 2.88-4.35 3.09-1.3.14-1.9.05-5.22-.61-5.53-1.1-9.07-.88-13.8 1.88-15.72 9.17-26.5 23.55-26.5 41.14 0 9.2-3.28 17.29-10.15 28.28l-3.68 5.77c-1.39 2.19-2.35 3.77-3.17 5.25-2.02 3.63-3 6.38-3 8.7 0 4.19 2.87 5.67 11.9 7.52l.61.12c8.27 1.7 11.7 3.13 13.4 6.95 3.17 7.14 36 0 54.6-10.46 14.98-8.43 33.49-32.99 33.49-43.13 0-5.9 3.47-9.48 10.16-12.55 1.1-.5 4.85-2.08 5.52-2.38.74-.34 1.32-.64 1.8-.93.92-.55 1.85-1 3.25-1.62 7.65-3.35 10.75-5.5 16.47-13.12 1.02-1.36 7.47-9.42 8.47-11.11 1.79-3.01 2.33-6.06 2.33-13.3zm-37.18-22.4c.15-.1 2.4-1.51 2.95-1.84.96-.57 1.7-.94 2.43-1.17 2.57-.83 5.06-.1 11.04 3.12 14.86 8 19.43 22.87 9.18 38.71-4.04 6.24-9.37 9-18.72 11.11-.85.2-1.2.27-3.13.68-6.04 1.29-8.78 2.08-11.6 3.65-3.63 2.02-6.09 4.98-7.5 9.44-7.87 24.93-19.72 43.34-36.28 50.31-16.45 6.93-21.13 8.53-27.98 8.89-4.94.25-9.8-.65-15.4-2.89a44.45 44.45 0 0 1-5.64-2.6c-4.02-2.33-5.14-4.74-4.5-9.31.3-2.13 3.77-15.53 4.84-20.65.63-3.05 1.19-6.14 1.75-9.69a464.04 464.04 0 0 0 1.35-8.9c1.42-9.41 2.5-14.27 4.49-18.65 2.46-5.43 6.13-9.03 11.72-11.13 6.59-2.47 10.54-3.1 18.03-3.53 4.75-.27 6.68-.64 9-2.05.61-.37 1.22-.81 1.82-1.33a30.61 30.61 0 0 0 3.37-3.4c.59-.69 2.38-2.9 2.63-3.19 3.36-4 6.3-5.53 12.33-5.53 3.94 0 5.9-.92 8.18-3.36-.17.18 2.75-3.14 3.85-4.22a30.95 30.95 0 0 1 6.79-5c1.5-.83 3.15-1.62 4.99-2.38a64.92 64.92 0 0 0 10.01-5.1zm-14.52 8.34a29.95 29.95 0 0 0-6.57 4.84 116.68 116.68 0 0 0-3.82 4.2c-2.46 2.63-4.68 3.67-8.91 3.67-5.72 0-8.39 1.39-11.57 5.17-.23.28-2.03 2.5-2.63 3.2a31.6 31.6 0 0 1-3.47 3.51c-.65.55-1.3 1.03-1.96 1.43-2.5 1.51-4.55 1.9-9.47 2.19-7.39.42-11.25 1.04-17.72 3.47-5.34 2-8.82 5.4-11.17 10.6-1.93 4.27-3 9.07-4.41 18.39l-.65 4.34-.7 4.57c-.57 3.56-1.12 6.67-1.76 9.73-1.08 5.18-4.54 18.53-4.83 20.59-.59 4.17.35 6.18 4.01 8.3 1.35.77 3.1 1.58 5.52 2.55 5.46 2.18 10.18 3.05 14.97 2.8 6.69-.34 11.32-1.93 27.65-8.8 16.21-6.83 27.92-25.01 35.71-49.7 1.49-4.7 4.12-7.86 7.97-10 2.93-1.63 5.74-2.45 11.87-3.76 1.92-.4 2.28-.49 3.12-.68 9.12-2.06 14.24-4.7 18.1-10.67 9.92-15.34 5.55-29.55-8.82-37.29-5.75-3.1-8.03-3.76-10.25-3.05-.65.2-1.33.54-2.23 1.08-.55.32-2.77 1.72-2.93 1.82a65.91 65.91 0 0 1-10.16 5.17c-1.8.75-3.42 1.52-4.89 2.33zm-42.39 32.72c16.15-2.87 26.36-.97 32.47 6.16 5.08 5.93 1.13 21.42-5.93 35.55-4.79 9.58-10.6 16.21-23.16 25.19-14.15 10.1-35.5 12.2-40.71 3.85-1.86-2.97-2.1-8.14-1.06-15.73.78-5.68 1.86-10.71 4.73-22.98l.12-.51c1.59-6.8 2.37-10.31 3.14-14.14 1.45-7.25 3.74-11.47 7.26-13.74 2.81-1.8 5.53-2.28 12.33-2.62 5.33-.27 7.56-.46 10.81-1.03zm.18.98c-3.3.59-5.56.78-10.94 1.05-6.62.33-9.23.78-11.84 2.46-3.25 2.1-5.42 6.09-6.82 13.1-.77 3.84-1.56 7.35-3.15 14.17l-.12.5c-2.86 12.24-3.93 17.26-4.7 22.9-1.03 7.36-.79 12.36.9 15.07 4.82 7.7 25.54 5.67 39.29-4.15 12.43-8.88 18.13-15.39 22.84-24.81 6.86-13.72 10.75-29 6.07-34.45-5.84-6.81-15.7-8.65-31.53-5.84zM132 276.5c7.12 0 10.66 3.08 11.25 8.7.42 4.02-.43 8.14-2.77 15.94-2.56 8.52-18.36 25.38-27.2 31.28-7.01 4.67-20.02 5.67-26.57.99-3.99-2.85-3.53-12.08.02-26.46.68-2.75 1.47-5.65 2.37-8.76a412.6 412.6 0 0 1 3.05-10.14l.37-1.2c1.48-4.8 5.1-7.75 10.73-9.27 4.4-1.2 9.54-1.5 17.48-1.33l3.89.1c3.87.11 5.42.15 7.38.15zm0 1c-1.97 0-3.53-.04-7.41-.15l-3.88-.1c-7.85-.17-12.92.13-17.2 1.3-5.32 1.43-8.67 4.16-10.03 8.6a1277.83 1277.83 0 0 1-1.6 5.21c-.68 2.2-1.27 4.17-1.82 6.1-.9 3.1-1.68 5.99-2.36 8.73-3.43 13.88-3.87 22.93-.4 25.4 6.17 4.42 18.73 3.45 25.42-1 8.66-5.78 24.33-22.49 26.8-30.73 2.3-7.67 3.14-11.71 2.73-15.56-.53-5.1-3.64-7.8-10.25-7.8zm-17.79 7a31.3 31.3 0 0 1 8.57 1.4c5.42 1.78 8.72 5.03 8.72 10.1 0 9.59-9.51 17.2-22.34 21.47-9.82 3.28-13.62-1.79-11.66-16.54.84-6.28 3.82-10.67 8.24-13.46a20.38 20.38 0 0 1 8.47-2.97zm-.6 1.08a19.39 19.39 0 0 0-7.34 2.73c-4.18 2.64-6.98 6.78-7.77 12.76-1.89 14.11 1.36 18.45 10.34 15.46C121.3 312.37 130.5 305 130.5 296c0-4.56-2.98-7.5-8.03-9.15a28.05 28.05 0 0 0-8.2-1.35c-.13 0-.35.03-.66.08zm80.87-23.45c-2.72 9.8-14.93 9.86-26.72 3.3-10.17-5.64-13.8-17.98-5-22.87a66.53 66.53 0 0 0 4.48-2.7l2.03-1.3a50.15 50.15 0 0 1 3.92-2.3c4.73-2.43 8.82-2.8 14-.72 9.16 3.66 10.98 13.33 7.3 26.6zm-20.83-24.98a49.26 49.26 0 0 0-3.84 2.25l-2.03 1.3c-.84.53-1.5.95-2.16 1.35-.82.5-1.6.96-2.38 1.39-7.94 4.4-4.59 15.8 5 21.12 11.31 6.29 22.8 6.23 25.28-2.7 3.57-12.83 1.85-21.97-6.7-25.4-4.9-1.95-8.69-1.62-13.17.7zm17.85 12.15c0 5.7-2.44 9-6.64 9.96-3.3.76-7.56-.05-11.08-1.81l-1.89-.94c-.67-.34-1.18-.62-1.63-.88-4.07-2.38-4.13-4.97.34-10.93 6.8-9.06 20.9-7.16 20.9 4.6zm-1 0c0-5.3-2.87-8.55-7.32-9.16-4.23-.57-8.99 1.44-11.78 5.16-4.15 5.54-4.1 7.44-.64 9.47.44.25.93.51 1.59.85l1.87.93c3.34 1.67 7.36 2.44 10.42 1.74 3.73-.86 5.86-3.74 5.86-9zm196.5 281c0-12.8 2.44-16.74 18.48-29.77a56.8 56.8 0 0 1 7.61-5.2c2.6-1.5 5.33-2.82 8.5-4.18 1.24-.53 2.48-1.05 4.1-1.7l3.92-1.57c9.4-3.83 13.74-6.7 16.62-12.05 1.2-2.22 2.21-4.4 3.23-6.83a148.57 148.57 0 0 0 1.54-3.84l.3-.74.56-1.44c3.2-8.02 6.05-12.08 12.7-16.5a35.26 35.26 0 0 0 4.96-4 46.36 46.36 0 0 0 3.88-4.29c.27-.34 2.55-3.2 3.2-3.98 3.48-4.15 6.51-5.9 11.51-5.9 3.08 0 5.62-.63 9.57-2.1 5.42-2.02 6.53-2.34 8.96-2.2 2.53.13 4.85 1.26 7.18 3.59 1.3 1.3 5.55 5.83 6.52 6.78 5.06 5 9.44 6.92 17.77 6.92a197.5 197.5 0 0 1 12.08.45c15.93.87 21.94.57 25.28-2.21 6.91-5.77 11.64-2.73 11.64 7.76 0 10.73-8.6 20-19 20-4.8 0-8.32 1.43-9.34 3.67-1.12 2.48.68 6.15 5.98 10.57 13.6 11.33 11.24 20.76-7.64 20.76a21.91 21.91 0 0 0-14.6 5.24c-3.28 2.71-5.8 5.86-9.85 11.82l-1.52 2.25c-3.1 4.57-5.01 7.1-7.32 9.4-6.21 6.21-9.3 7.64-13.05 6.89l-1-.23a10.82 10.82 0 0 0-2.66-.37c-1.6 0-2.41.67-8.18 6.22-4.85 4.67-8.07 6.78-11.82 6.78-1.33 0-3.46 1.15-6.45 3.45-1.27.98-2.68 2.14-4.5 3.7l-4.92 4.29a181.11 181.11 0 0 1-4.54 3.82c-9.33 7.56-15.63 10.2-20.21 6.52-2.7-2.15-4.14-4.51-4.63-7.26-.37-2.04-.26-3.63.29-7.3.87-5.85.65-8.42-1.83-11.6-2.32-2.98-2.96-3.22-3.77-2.39-.25.26-1.35 1.63-1.61 1.94-2.21 2.5-4.85 3.57-9 2.82-4.6-.84-5.57-4.11-4.72-10.09l.24-1.56c.6-3.66.68-4.93.25-5.8-.44-.86-1.9-.94-5.23.4l-.74.29c-13.78 5.54-15.26 6.09-19.43 6.67-6.03.84-9.31-1.6-9.31-7.9zm2 0c0 5 2.14 6.6 7.04 5.92 3.91-.55 5.43-1.1 18.95-6.55l.75-.3c4.17-1.66 6.7-1.54 7.76.58.71 1.43.62 2.76-.06 7l-.24 1.53c-.72 5.04-.06 7.27 3.09 7.84 3.43.62 5.38-.17 7.15-2.18.2-.23 1.34-1.66 1.68-2 1.9-1.96 3.82-1.25 6.78 2.55 2.9 3.74 3.17 6.77 2.22 13.12-1 6.75-.52 9.4 3.62 12.71 3.49 2.8 9.1.45 17.7-6.51 1.35-1.1 2.75-2.28 4.49-3.78l4.93-4.3c1.84-1.58 3.27-2.76 4.58-3.77 3.34-2.56 5.74-3.86 7.67-3.86 3.04 0 5.95-1.9 10.43-6.22l2.46-2.39c.94-.89 1.67-1.56 2.37-2.13 1.81-1.49 3.3-2.26 4.74-2.26 1.03 0 1.81.13 3.1.42.7.16.71.17.96.21 2.96.6 5.45-.55 11.23-6.33 2.2-2.2 4.06-4.65 7.09-9.11l1.52-2.25c4.15-6.11 6.76-9.37 10.22-12.24a23.9 23.9 0 0 1 15.88-5.7c16.87 0 18.62-7.01 6.36-17.23-5.9-4.92-8.12-9.41-6.52-12.93 1.42-3.12 5.67-4.84 11.16-4.84 9.25 0 17-8.34 17-18 0-8.94-2.88-10.79-8.36-6.23-3.94 3.28-9.98 3.59-26.67 2.68l-1.02-.06c-5.09-.27-7.99-.39-10.95-.39-8.88 0-13.76-2.14-19.18-7.5-1-.98-5.26-5.53-6.53-6.79-1.99-1.99-3.86-2.9-5.87-3-2.03-.12-3.06.18-8.15 2.07-4.15 1.55-6.9 2.22-10.27 2.22-4.33 0-6.84 1.46-9.98 5.2-.63.74-2.89 3.6-3.18 3.95a48.29 48.29 0 0 1-4.04 4.46 37.26 37.26 0 0 1-5.24 4.23c-6.26 4.17-8.9 7.91-11.95 15.58l-.57 1.43-.28.74a531.5 531.5 0 0 1-1.56 3.88 77.49 77.49 0 0 1-3.32 7c-3.16 5.88-7.82 8.97-17.63 12.96l-3.92 1.58c-1.6.64-2.84 1.15-4.05 1.67a79.2 79.2 0 0 0-8.3 4.08 54.8 54.8 0 0 0-7.35 5.02c-15.62 12.7-17.74 16.13-17.74 28.23zm133.22-79.76c3.06 1.53 6.54 2.02 10.68 1.7 2.53-.2 4.91-.62 8.8-1.49 5.36-1.19 6.33-1.38 8.33-1.54 2.78-.23 4.82.17 6.29 1.4 1.58 1.31 1.96 2.72 1.26 4.22-.66 1.38-1.05 1.74-5.05 5.07-3.53 2.93-5.03 4.83-5.03 7.09 0 7.3 1.29 10.02 7.83 15.62 3.86 3.3 5.93 6.84 5.28 9.62-.75 3.25-4.96 5.02-12.61 5.02-7.18 0-12.7 4.61-20.03 14.68-.5.7-3.96 5.57-4.94 6.87a38.89 38.89 0 0 1-4.72 5.5c-1.06.98-2.09 1.7-3.1 2.15-2.85 1.26-5.05 1.57-9.83 1.74-7.66.27-10.87 1.45-14.98 7.1-1.58 2.17-3.11 4-4.68 5.6a42.87 42.87 0 0 1-8.65 6.69c-.15.08-10.69 6.19-14.8 8.83-3.76 2.42-6.45 2.04-8.22-.77-1.28-2.03-1.9-4.54-2.87-10.35-.84-5.08-1.27-7.08-2.06-8.93-.97-2.3-2.21-3.24-4.02-2.88-6.2 1.24-8.95 1.39-10.98.2-2.37-1.4-3.13-4.62-2.62-10.73.16-1.96-1.04-2.87-3.76-3.04-2.24-.13-4.9.2-9.94 1.12l-.69.12c-7.97 1.45-10.72 1.72-12.72.73-2.91-1.43-1.6-5.27 4.23-12.21 5.48-6.53 10.6-10.81 15.76-13.53 3.74-1.97 5.94-2.65 12.16-4.1 7.29-1.72 10.4-3.51 14.04-9.31 2.96-4.75 10.74-18.62 12.14-20.84 3.59-5.67 6.8-9.1 11.05-11.34 2.6-1.38 4.72-2.82 9.17-6.07l1.38-1.01c7.85-5.72 12.3-7.98 17.68-7.98 4.22 0 6.49 1.36 9.13 4.77.34.43 1.67 2.22 2 2.67.85 1.09 1.6 1.98 2.45 2.83a24.29 24.29 0 0 0 6.64 4.78zm-.44.9c-2.8-1.4-5-3.03-6.92-4.97-.87-.9-1.65-1.81-2.51-2.93-.35-.46-1.68-2.25-2.01-2.67-2.47-3.18-4.46-4.38-8.34-4.38-5.09 0-9.4 2.2-17.09 7.78l-1.38 1.01c-4.49 3.29-6.63 4.74-9.3 6.15-4.06 2.15-7.16 5.45-10.66 11-1.39 2.19-9.16 16.05-12.15 20.82-3.79 6.07-7.13 7.98-14.66 9.75-6.13 1.45-8.27 2.1-11.92 4.02-5.04 2.66-10.05 6.86-15.46 13.3-5.43 6.46-6.53 9.69-4.55 10.66 1.7.84 4.48.57 12.1-.81l.7-.13c5.12-.93 7.82-1.27 10.17-1.12 3.21.2 4.92 1.48 4.7 4.11-.48 5.76.2 8.64 2.13 9.78 1.73 1.02 4.34.88 10.27-.31 2.35-.47 4 .78 5.14 3.47.83 1.95 1.27 4 2.07 8.8l.06.36c.94 5.65 1.55 8.11 2.72 9.98 1.46 2.3 3.52 2.6 6.84.46 4.14-2.66 14.69-8.77 14.81-8.85a41.9 41.9 0 0 0 8.46-6.54 47.89 47.89 0 0 0 4.6-5.48c4.32-5.95 7.81-7.23 15.74-7.5 4.66-.17 6.76-.47 9.46-1.67.9-.4 1.85-1.06 2.84-1.96a38.03 38.03 0 0 0 4.6-5.36c.96-1.3 4.4-6.16 4.93-6.87 7.5-10.31 13.22-15.09 20.83-15.09 7.24 0 11.02-1.6 11.64-4.24.54-2.32-1.36-5.55-4.97-8.64-6.75-5.79-8.17-8.79-8.17-16.38 0-2.67 1.64-4.74 5.39-7.86 3.8-3.17 4.23-3.56 4.78-4.73.5-1.06.25-1.99-.99-3.03-2.23-1.85-4.72-1.65-13.76.36-3.93.87-6.35 1.3-8.94 1.5-4.3.34-7.97-.18-11.2-1.8zm-28-3.9c5.65-2.82 8.96-2.2 12.9 1.37.56.5 2.6 2.47 3.02 2.87 4.2 3.89 8.07 5.71 14.3 5.71 11.37 0 14 1.41 16.1 8.09.26.83 1.35 4.6 1.66 5.62.8 2.63 1.64 5.03 2.7 7.6 2.13 5.17 2.64 8.32 1.72 10.24-.77 1.61-2.1 2.18-5.37 2.79-2.32.43-2.8.53-3.85.85-1.85.58-3.35 1.4-4.6 2.66-1 1-2.02 2.13-3.31 3.66-.6.71-2.91 3.5-3.46 4.14-7.2 8.54-12.43 12.35-19.59 12.35-3.76 0-6.95 1.28-10.59 4-1.84 1.37-11.62 10.31-15.22 13.06a73.09 73.09 0 0 1-8.95 5.88c-4.58 2.54-7.35 3.22-8.98 2.23-1.32-.8-1.65-2.07-1.94-5.5a52.53 52.53 0 0 0-.16-1.81c-.54-4.73-2.24-6.86-7.16-6.86-7.11 0-8.85-1.23-9.73-5.41-.96-4.61-2.1-6.7-6.55-9.67-3.97-2.65-4.31-5.42-1.52-8.22 2-2 4.63-3.5 11.35-6.87 6.61-3.3 9.2-4.8 11.1-6.68a39.09 39.09 0 0 0 5.3-6.48c.98-1.5 1.83-3.04 2.88-5.13l2.12-4.3c.91-1.83 1.72-3.37 2.61-4.98 5.74-10.32 10.37-14.78 23.22-21.2zm-22.34 21.7c-.89 1.59-1.69 3.12-2.6 4.94l-2.11 4.3a52.9 52.9 0 0 1-2.94 5.23 40.08 40.08 0 0 1-5.44 6.63c-2 2-4.62 3.51-11.35 6.87-6.6 3.3-9.2 4.8-11.1 6.69-2.33 2.34-2.08 4.37 1.38 6.67 4.7 3.14 5.96 5.46 6.97 10.3.78 3.7 2.09 4.62 8.75 4.62 5.5 0 7.57 2.57 8.15 7.75.06.5.09.82.17 1.84.25 3.06.55 4.17 1.46 4.72 1.2.74 3.69.13 7.98-2.25a72.09 72.09 0 0 0 8.82-5.8c3.55-2.7 13.34-11.65 15.24-13.07 3.79-2.83 7.18-4.19 11.18-4.19 6.77 0 11.8-3.67 18.83-12l3.45-4.13a60.07 60.07 0 0 1 3.37-3.72 11.72 11.72 0 0 1 5.01-2.91c1.1-.34 1.6-.45 3.97-.89 2.95-.55 4.07-1.02 4.65-2.23.76-1.59.28-4.5-1.74-9.43a84.46 84.46 0 0 1-2.74-7.69c-.31-1.03-1.4-4.8-1.66-5.61-1.95-6.2-4.16-7.39-15.14-7.39-6.5 0-10.61-1.93-14.98-5.98-.44-.4-2.46-2.37-3.01-2.86-3.65-3.3-6.52-3.85-11.79-1.21-12.67 6.33-17.15 10.65-22.78 20.8zm55.86 11.93c-2.98 6.45-16.78 15.26-26.74 15.26-5.33 0-7.56-2.98-7.11-7.86.32-3.48 2.1-7.91 3.93-10.61l1.52-2.32a44.95 44.95 0 0 1 1.88-2.7c3.66-4.8 7.85-7.45 13.62-7.45 9.06 0 15.75 9.52 12.9 15.68zm-.9-.42c2.52-5.47-3.65-14.26-12-14.26-5.4 0-9.33 2.48-12.82 7.06-.6.8-1.17 1.6-1.85 2.64 0 0-1.2 1.87-1.52 2.33-1.74 2.57-3.46 6.85-3.77 10.14-.4 4.33 1.43 6.77 6.12 6.77 9.57 0 23.02-8.58 25.83-14.68zm-69.67 20.74c2.08.18 4.44.81 5.88 1.8 2.12 1.47 2.2 3.6-.26 6.05-5.14 5.15-12.85 4.34-12.85-1.35 0-4.66 3.14-6.84 7.23-6.5zm-.09 1c-3.56-.3-6.14 1.5-6.14 5.5 0 4.58 6.53 5.26 11.15.65 2.03-2.04 1.98-3.43.4-4.52-1.27-.88-3.48-1.47-5.4-1.63zm29.59-225.95c4.64 2.35 17.27 8.24 19.39 9.43a24.14 24.14 0 0 1 7.05 5.64 45.03 45.03 0 0 1 3.75 5.2c2.4 3.78.04 7.66-6.2 11.63-4.97 3.16-12.18 6.3-21.95 9.82-4.84 1.74-19.63 6.68-21.1 7.2-6.59 2.33-14.85.1-25.14-5.86-3.93-2.27-8-5-12.94-8.54-2.23-1.61-9.5-6.99-10.7-7.85a81.21 81.21 0 0 0-8.63-5.7c-4.82-2.6-4.45-6.64.17-12.13 3.27-3.88 4.17-4.67 18.1-16.33a230.2 230.2 0 0 0 8.89-7.74 95.2 95.2 0 0 0 4.72-4.66c5.08-5.43 9.8-6.49 14.97-3.92 2.24 1.1 4.53 2.85 7.43 5.52 1.48 1.37 6.94 6.72 7.98 7.7 5.2 4.91 9.46 8.2 14.2 10.6zm-.46.9c-4.85-2.45-9.18-5.79-14.44-10.76-1.05-1-6.5-6.34-7.97-7.69-2.83-2.61-5.06-4.3-7.2-5.37-4.75-2.36-9-1.4-13.8 3.71a96.18 96.18 0 0 1-4.76 4.71c-2.48 2.3-5.16 4.62-8.92 7.77-13.86 11.6-14.77 12.4-17.98 16.21-4.28 5.08-4.58 8.4-.46 10.61 2.23 1.2 4.9 2.99 8.74 5.77 1.2.87 8.47 6.24 10.7 7.85a154.8 154.8 0 0 0 12.85 8.49c10.06 5.82 18.07 7.98 24.3 5.78 1.48-.52 16.27-5.47 21.1-7.2 9.7-3.5 16.86-6.61 21.75-9.72 5.84-3.71 7.9-7.1 5.9-10.26a44.09 44.09 0 0 0-3.67-5.08 23.16 23.16 0 0 0-6.78-5.42c-2.08-1.16-14.68-7.05-19.36-9.4zm-38.83 8.05c3.11-.37 5.7-.13 8.4.7 2.15.66 2.74.93 8.64 3.77 4.75 2.29 8.39 3.86 13.19 5.56 8.38 2.97 11.32 6.23 8.83 9.76-2.08 2.94-8.04 5.92-17.84 9.18-8.45 2.82-15.48 2.35-21.43-.9-4.65-2.55-8.33-6.5-12.15-12.3-2.9-4.41-2.73-8.2.16-11.06 2.48-2.45 6.87-4.07 12.2-4.7zm.12 1c-5.13.6-9.33 2.16-11.62 4.42-2.53 2.5-2.68 5.77-.02 9.8 3.73 5.68 7.3 9.51 11.8 11.97 5.7 3.11 12.43 3.57 20.62.84 9.59-3.2 15.44-6.12 17.34-8.82 1.94-2.75-.5-5.45-8.35-8.24-4.84-1.72-8.5-3.3-13.28-5.6-5.84-2.81-6.42-3.07-8.5-3.71a18.42 18.42 0 0 0-8-.66zM202.5 500.38c0 4.78-1.45 7.56-4.43 8.93-2.29 1.05-4.55 1.23-10.79 1.2l-1.78-.01c-9.19 0-17-7.65-17-15.5 0-7.59 10.6-10.51 19.74-5.44 2.78 1.55 4.21 1.94 8.57 2.75 4.44.83 5.69 2.27 5.69 8.07zm-1 0c0-5.3-.9-6.34-4.88-7.08-4.45-.83-5.96-1.25-8.86-2.86-8.57-4.76-18.26-2.1-18.26 4.56 0 7.3 7.36 14.5 16 14.5h1.79c6.06.04 8.26-.14 10.36-1.1 2.6-1.2 3.85-3.6 3.85-8.02zm33.33-117.85c3.71-1.31 8.7-2.7 16.1-4.55 2.58-.65 16.53-4.04 20.56-5.05 19.59-4.93 31.55-8.9 38.23-13.35 14.93-9.95 36.87-33.88 43.83-47.8 2.25-4.5 4.65-6.38 7.68-6.25 1.26.06 2.61.45 4.32 1.2a50.81 50.81 0 0 1 3.54 1.7l1.26.63c4.78 2.34 8.38 3.44 12.65 3.44 7.2 0 10.01 3.07 8.35 7.91-1.4 4.06-5.92 8.91-11.1 12.02-8.3 4.98-11.75 17.3-11.75 33.57 0 3.59-1.37 6.28-3.98 8.36-1.98 1.58-4.2 2.6-8.47 4.16l-1.02.37c-4.85 1.75-6.98 2.77-8.68 4.46-5.09 5.1-12.54 7.15-20.35 7.15-1.38 0-2.47.92-3.99 3.1-.29.41-1.32 1.95-1.47 2.18-2.68 3.92-4.93 5.72-8.54 5.72-7.84 0-10.74.93-21.76 6.94-5.18 2.82-8.8 3.58-14.66 3.68-.26 0-.47 0-.92.02-4.82.06-7.12.3-10.51 1.34a73.43 73.43 0 0 0-8.89 3.56c-2.17 1-10.53 5.01-10.23 4.87-7.79 3.7-13.32 5.98-18.9 7.57-12.41 3.55-18.58 2.24-27.42-4.07-2.58-1.85-2.72-4.43-.83-7.62 1.45-2.45 3.9-5.09 8.08-8.97l1.78-1.64c3.92-3.6 4.48-4.11 5.9-5.53 2.32-2.32 3.12-3.5 5.48-7.63 1.93-3.36 3.37-5.11 6.27-7.06 2.3-1.54 5.34-2.98 9.44-4.43zm.34.94c-4.03 1.42-7 2.83-9.22 4.32-2.75 1.85-4.1 3.49-5.96 6.73-2.4 4.2-3.24 5.44-5.64 7.83-1.43 1.44-2 1.96-5.94 5.57l-1.77 1.63c-4.1 3.82-6.52 6.41-7.9 8.75-1.65 2.79-1.54 4.8.55 6.3 8.6 6.14 14.46 7.38 26.57 3.92 5.5-1.57 11-3.84 18.74-7.51-.3.14 8.06-3.88 10.24-4.88a74.3 74.3 0 0 1 9.01-3.6c3.51-1.09 5.89-1.33 10.8-1.4h.91c5.72-.1 9.18-.83 14.2-3.57 11.16-6.08 14.2-7.06 22.24-7.06 3.19 0 5.2-1.6 7.71-5.28l1.48-2.2c1.7-2.43 3-3.52 4.81-3.52 7.57 0 14.78-2 19.65-6.85 1.83-1.84 4.04-2.9 9.04-4.7l1.02-.37c8.6-3.13 11.79-5.67 11.79-11.58 0-16.6 3.53-29.2 12.24-34.43 5-3 9.35-7.67 10.66-11.48 1.42-4.13-.83-6.59-7.4-6.59-4.45 0-8.19-1.14-13.09-3.54-7.52-3.67-6.78-3.34-8.72-3.43-2.58-.1-4.65 1.52-6.74 5.7-7.04 14.07-29.1 38.14-44.17 48.19-6.81 4.54-18.84 8.52-38.55 13.48-4.03 1.02-17.98 4.4-20.56 5.05-7.37 1.84-12.33 3.23-16 4.52zM252 387.5c2.08 0 4-.2 7.25-.69 5.22-.77 6.64-.9 8.46-.5 2.52.56 3.79 2.35 3.79 5.69 0 4.05-2.27 7.29-6.62 10.11-3.24 2.1-6.53 3.53-14.15 6.4l-.27.1-2.28.86c-3.04 1.16-5.27 2.52-9.33 5.43l-.8.57c-8.19 5.88-13.35 8.03-23.05 8.03-4.98 0-6.88-2.03-5.75-5.62.87-2.81 3.58-6.56 7.8-11.13 1.26-1.37 2.64-2.8 4.15-4.3 3.17-3.14 11.25-10.61 11.45-10.8.46-.47.93-.89 1.4-1.26 3.38-2.71 5.77-3.08 14.18-2.93 1.65.03 2.63.04 3.77.04zm0 1c-1.15 0-2.13-.01-3.79-.04-8.18-.14-10.4.2-13.54 2.71-.44.35-.88.74-1.32 1.18-.2.21-8.3 7.69-11.45 10.82a134.6 134.6 0 0 0-4.12 4.26c-4.12 4.47-6.76 8.12-7.58 10.75-.9 2.88.45 4.32 4.8 4.32 9.46 0 14.44-2.07 22.46-7.84l.8-.57c4.13-2.96 6.42-4.36 9.56-5.56l2.3-.86.25-.1c7.55-2.84 10.8-4.25 13.97-6.3 4.08-2.65 6.16-5.6 6.16-9.27 0-2.89-.97-4.26-3-4.7-1.65-.37-3.05-.25-8.1.5-3.3.5-5.26.7-7.4.7zm112.47-45.34c-1.88 5.44-1.98 6.76-.98 12.76 1.18 7.06-1.38 16.58-5.49 16.58a16.89 16.89 0 0 0-1.51.07l-.64.04c-2.86.18-4.83.17-6.94-.17-6.55-1.06-10.41-5.14-10.41-13.44 0-13.9 2.14-19.69 8.13-26.33a21.9 21.9 0 0 0 2.52-3.75c.59-1.03 2.78-5.13 2.72-5.01 4.44-8.14 7.71-11.53 12.25-10.4 1.17.3 2.2.77 3.58 1.59l1.39.84a20 20 0 0 0 3.1 1.6c.7.27 1.8.32 4.75.26l.72-.01c3.16-.05 4.78.08 5.83.66 1.61.89 1.2 2.56-1.14 4.9a215.9 215.9 0 0 1-3.86 3.76c-10.6 10.1-12.75 12.4-14.02 16.05zm-.94-.32c1.34-3.9 3.46-6.17 14.27-16.46 1.55-1.47 2.73-2.62 3.85-3.73 1.94-1.95 2.17-2.88 1.35-3.33-.82-.45-2.37-.58-5.32-.53l-.72.01c-3.14.06-4.26.02-5.14-.34-1.06-.41-1.97-.9-3.25-1.67l-1.38-.83a12.1 12.1 0 0 0-3.31-1.47c-3.88-.97-6.92 2.17-11.13 9.9.07-.13-2.14 3.98-2.73 5.02a22.71 22.71 0 0 1-2.65 3.92c-5.81 6.47-7.87 12-7.87 25.67 0 7.79 3.48 11.47 9.57 12.45 2.01.33 3.92.34 6.71.16a371.33 371.33 0 0 0 1.23-.07c.42-.03.73-.04.99-.04 3.2 0 5.6-8.9 4.5-15.42-1.02-6.16-.91-7.64 1.03-13.24zm-9.26 12.42c.58.52 2.5 1.9 2.55 1.93 1.96 1.57 2.04 3.31.01 6.36-3.74 5.64-8.83 3.09-8.83-4.55 0-3.81.51-5.67 2.07-6.02 1.18-.26 2 .3 4.2 2.28zm-1.34 1.48c-1.5-1.35-2.23-1.85-2.43-1.8-.17.03-.5 1.23-.5 4.06 0 5.87 2.67 7.21 5.17 3.45 1.5-2.26 1.47-2.84.4-3.7.03.03-1.95-1.4-2.64-2zm222.9-130.19c2.2-1.1 3.67-1.66 5.88-2.36l.28-.09a48.92 48.92 0 0 0 8.79-3.55c4.17-2.08 6.35-1.88 6.96.84.44 2 .2 4.01-1.25 12.7-2.27 13.62-9.16 26.14-21.17 36.3-4.3 3.63-7.41 4.39-9.75 2.44-1.88-1.57-3.1-4.57-4.61-10.48-.3-1.15-1.43-5.83-1.72-6.96a114.18 114.18 0 0 0-2.71-9.22c-2.4-6.82-3.03-10.78-2.1-12.94.77-1.83 2.08-2.24 5.6-2.45 1.49-.09 2.09-.14 2.97-.28l1.95-.33c.72-.12 1.22-.2 1.68-.29 1.1-.2 1.92-.38 2.71-.6 1.7-.49 3.42-1.2 6.49-2.73zm.44.9c-3.11 1.54-4.88 2.29-6.65 2.79-.84.23-1.69.42-2.81.63a108.77 108.77 0 0 1-3.81.63c-.77.13-1.39.19-2.92.28-3.13.18-4.17.51-4.74 1.85-.78 1.84-.2 5.62 2.13 12.2a115.12 115.12 0 0 1 2.74 9.31l1.72 6.96c1.46 5.7 2.62 8.58 4.28 9.96 1.87 1.56 4.49.93 8.47-2.44 11.82-10 18.6-22.3 20.83-35.7 1.4-8.45 1.65-10.51 1.25-12.31-.41-1.87-1.86-2-5.54-.16a49.87 49.87 0 0 1-8.93 3.6l-.28.1a35.4 35.4 0 0 0-5.74 2.3zm-4.5 6.58c1.37-.32 2.5-.75 3.9-1.42.35-.18 2.57-1.31 3.32-1.67 1.5-.71 2.97-1.31 4.7-1.89 2.7-.9 4.64-.77 5.88.4.98.94 1.34 2.26 1.41 4.18.02.4.02.7.02 1.37 0 5.63-4.63 16.88-11.34 22.75-4.34 3.8-7.31 4.67-9.92 2.52-2.06-1.7-3.5-4.65-6.67-12.91-1.86-4.83-2.05-8.1-.68-10.2 1.12-1.7 2.9-2.36 5.83-2.7l1.26-.12c1.19-.12 1.75-.19 2.3-.31zm-2.1 2.3-1.22.12c-2.4.27-3.7.76-4.39 1.81-.93 1.43-.78 4.1.87 8.38 3.02 7.84 4.41 10.71 6.08 12.09 1.63 1.34 3.64.75 7.33-2.48C584.6 250.77 589 240.08 589 235c0-.64 0-.93-.02-1.29-.05-1.44-.3-2.33-.79-2.8-.6-.57-1.8-.65-3.87.04a37.95 37.95 0 0 0-4.47 1.8c-.72.34-2.93 1.47-3.32 1.66a19.54 19.54 0 0 1-4.3 1.56c-.66.16-1.28.24-2.56.36zm-227.73-88.98c-1.59 4.3-3.54 7.25-7.14 11.4l-2.6 2.97a67.02 67.02 0 0 0-2.63 3.23 46.4 46.4 0 0 0-4.68 7.5c-2.85 5.7-7.14 10.18-12.85 13.89-4.25 2.76-8.25 4.62-15.67 7.59-11.01 4.4-16.43 1.26-27.22-16.4-2.86-4.69-8.8-8.63-17.98-12.66-3-1.33-12.88-5.24-14.43-5.92-4.96-2.18-7.04-3.72-6.42-5.85.67-2.32 5.3-4.05 15.48-6.08 16.63-3.32 26.93-3.82 39.93-3.02 7.9.49 9.67.5 12.74-.26 1.99-.48 3.92-1.3 6-2.6l2.79-1.71c9.86-6.14 12.94-7.96 17.3-9.9 6.03-2.71 10.57-3.32 13.94-1.4 7.2 4.12 7.68 7.7 3.44 19.22zm-1.88-.7c3.95-10.7 3.6-13.26-2.56-16.78-2.66-1.52-6.62-.99-12.12 1.48-4.24 1.9-7.3 3.7-17.07 9.77l-2.79 1.73a22.6 22.6 0 0 1-6.57 2.84c-3.36.81-5.22.8-13.34.3-12.84-.78-22.97-.29-39.41 3-4.9.97-8.45 1.88-10.79 2.75-2.03.76-3.04 1.45-3.17 1.91-.16.57 1.48 1.79 5.3 3.46 1.5.67 11.39 4.58 14.44 5.93 9.52 4.19 15.74 8.3 18.87 13.44 10.35 16.93 14.87 19.56 24.78 15.6 7.3-2.93 11.21-4.75 15.33-7.42 5.42-3.53 9.47-7.75 12.15-13.1 1.44-2.9 3.02-5.4 4.86-7.82a68.95 68.95 0 0 1 2.72-3.33l2.6-2.97c3.46-3.99 5.28-6.75 6.77-10.79zm-6.64-.39c-7.94 12.8-18.53 21.75-33.3 25.23-7.82 1.83-12.47-.79-13.12-5.93-.55-4.45 2.29-9.06 6-9.06 3.02 0 5.6-1.68 15.38-9.16 1.47-1.12 2.57-1.96 3.66-2.74 4.4-3.2 7.77-5.17 10.82-6.08 5.57-1.67 9.33-2.15 11.35-1.22 2.5 1.14 2.22 4.13-.79 8.96zm-.84-.52c2.72-4.4 2.94-6.74 1.21-7.53-1.71-.79-5.32-.33-10.65 1.27-2.9.87-6.2 2.79-10.51 5.92-1.08.79-2.18 1.62-3.65 2.74-10.08 7.72-12.62 9.36-15.98 9.36-3.02 0-5.5 4.02-5 7.94.56 4.5 4.62 6.78 11.89 5.07 14.48-3.4 24.86-12.18 32.69-24.77zM461.17 33.53c13.88 4.96 20.75 4.96 31.62.01 3.02-1.37 5.47-2.94 11-6.82 5.57-3.92 8.05-5.51 11.14-6.92 4.14-1.88 7.78-2.38 11.22-1.28 3.92 1.26 6.2 12.3 6.78 28.45.5 14.2-.52 28.93-2.46 34.2-1.82 4.93-5.86 8.17-11.51 10.02A41.7 41.7 0 0 1 506 93.01c-5.79 0-9 2.4-12.2 7.64-.37.59-1.55 2.6-1.71 2.87-1.75 2.9-3.05 4.33-4.93 4.95-.94.32-2.07.83-3.87 1.74l-2.43 1.23c-1.03.53-1.87.94-2.7 1.34-6.43 3.1-11.73 4.72-17.16 4.72-5.71 0-10.04 2.09-14.02 5.92-1.16 1.11-4.2 4.53-4.63 4.94-2.54 2.44-5.93 4.24-10.85 6.1-1.4.52-5.98 2.13-6.25 2.22l-2.06.78c-.89.36-1.78.63-2.7.81-5.55 1.14-11.14-.54-17.98-4.42-1.27-.73-5.13-3.06-5.76-3.42-2.05-1.16-4.12-1.53-9.09-1.9l-1.73-.15c-4.78-.4-7.68-1.14-10.22-2.97-5-3.61-6.77-7.76-5.65-12.33 1.33-5.42 6.5-11.02 14.85-17.28a169.2 169.2 0 0 1 6.5-4.61c-.33.23 4.33-2.92 5.3-3.6 2.73-1.91 4.8-3.9 12.75-12.04l1.09-1.1c3.49-3.56 5.89-5.89 8.12-7.83 2.9-2.5 4.72-5.95 7.5-13.05l.63-1.61c2.7-6.92 4.28-10 6.87-12.33 1.42-1.28 6.68-6.54 7.93-7.5 3.98-3 8.01-2.73 19.57 1.4zm-.34.94c-11.26-4.02-15-4.28-18.62-1.53-1.19.9-6.4 6.11-7.88 7.43-2.42 2.18-3.96 5.19-6.6 11.95l-.63 1.61c-2.83 7.26-4.72 10.8-7.77 13.45a141.85 141.85 0 0 0-9.16 8.87c-8.02 8.2-10.08 10.2-12.88 12.16-.99.69-5.65 3.84-5.31 3.6-2.5 1.71-4.52 3.13-6.47 4.59-8.17 6.13-13.23 11.6-14.48 16.72-1.02 4.15.58 7.9 5.26 11.27 2.36 1.7 5.11 2.4 9.72 2.8l1.73.13c5.12.4 7.28.78 9.5 2.05.65.36 4.5 2.7 5.76 3.4 6.66 3.78 12.04 5.4 17.29 4.32.86-.17 1.7-.42 2.52-.75a67 67 0 0 1 2.1-.8c.28-.1 4.86-1.7 6.24-2.22 4.8-1.8 8.08-3.56 10.5-5.88.4-.38 3.44-3.8 4.63-4.94 4.16-4 8.72-6.2 14.72-6.2 5.25 0 10.42-1.59 16.73-4.62.82-.4 1.65-.8 2.68-1.33.12-.06 1.93-.99 2.43-1.23 1.84-.93 3-1.46 4-1.8 1.6-.52 2.76-1.82 4.39-4.52l1.7-2.88c3.39-5.5 6.87-8.11 13.07-8.11 4.45 0 8.73-.49 12.64-1.77 5.4-1.76 9.2-4.8 10.9-9.41 1.87-5.11 2.9-19.75 2.39-33.83-.56-15.53-2.81-26.48-6.08-27.52-3.18-1.02-6.57-.55-10.5 1.23-3.02 1.37-5.47 2.94-11 6.83-5.57 3.92-8.05 5.5-11.14 6.92-11.13 5.05-18.26 5.05-32.38.01zM475 55c5.38 0 7.55-.21 9.72-.96 1.26-.43 9.95-4.8 14.88-6.96 1.9-.82 3.56-2.44 6.6-6.04 2.56-3.04 3.19-3.75 4.4-4.84 3.7-3.35 7.07-3.28 10.22 1.23 6.23 8.9 5.61 15.94.07 27.02a71.26 71.26 0 0 0-2.5 5.48c-.32.8-1 2.7-1.09 2.9-.17.45-.34.81-.54 1.17-.63 1.14-1.56 2.21-4.05 4.7-2.4 2.4-5.16 3.27-11.68 4.33-1.81.3-2.2.36-3 .51-6.02 1.1-9.6 2.69-12.24 6.07-3.57 4.59-7.9 7.48-14.98 10.74-.55.24-1.1.5-1.8.8l-1.78.8a60.08 60.08 0 0 0-7.7 3.9c-2.57 1.6-4.79 2.35-9.42 3.46-8.58 2.06-12.28 3.76-17.37 9.36-5.12 5.64-10.17 7.64-16.63 6.7-5.36-.79-10.63-3.01-23.56-9.48-6.3-3.15-6.43-7.78-1.5-13.56 3.38-3.94 3.52-4.06 19.4-16.44 8.12-6.33 12.97-10.57 16.63-14.88 2.53-2.98 4.2-5.73 4.96-8.3 5.5-18.3 12.5-21.98 22.78-15.56 1.95 1.22 6.61 4.55 7.18 4.9 3.36 2.15 6.52 2.95 13 2.95zm0 2c-6.84 0-10.37-.89-14.08-3.26-.63-.4-5.27-3.71-7.16-4.9-9.05-5.65-14.66-2.7-19.8 14.45-.86 2.87-2.67 5.85-5.35 9.01-3.78 4.45-8.7 8.75-16.94 15.17-15.66 12.21-15.86 12.38-19.1 16.16-4.17 4.9-4.09 8 .88 10.48 12.71 6.35 17.89 8.54 22.94 9.28 5.78.84 10.18-.9 14.87-6.06 5.42-5.96 9.45-7.82 18.38-9.96 4.43-1.07 6.5-1.76 8.83-3.22a61.7 61.7 0 0 1 7.94-4.02l1.78-.8 1.78-.8c6.82-3.13 10.91-5.87 14.24-10.14 3-3.87 7-5.64 13.46-6.82.83-.15 1.21-.21 3.04-.51 6.1-1 8.6-1.78 10.58-3.77 2.36-2.36 3.21-3.34 3.72-4.26.15-.27.29-.56.44-.94.06-.15.75-2.06 1.09-2.9.64-1.6 1.45-3.4 2.57-5.64 5.24-10.49 5.8-16.8.07-24.98-2.4-3.44-4.37-3.48-7.24-.89-1.11 1-1.73 1.7-4.22 4.65-3.24 3.85-5.04 5.59-7.32 6.59-4.82 2.1-13.62 6.53-15.03 7.01-2.44.84-4.79 1.07-10.37 1.07zm-12.7 8.6c5.47 3.9 10.34 3.72 18.23.88 5.39-1.94 5.92-2.1 7.7-2.1 2.5-.01 4.21 1.36 5.24 4.46 1.66 4.98-2.32 8.52-12.3 12.68-2.7 1.13-16.25 6.18-20 7.73-7.86 3.24-13.93 6.42-18.87 10.15-13.02 9.84-18.36 11.93-23.71 9.68a24.67 24.67 0 0 1-3.62-1.98l-1.99-1.28a90.4 90.4 0 0 0-2.24-1.4c-3.33-2-2.82-4.28.85-7.34 1.35-1.13 10.66-7.61 13.53-9.91 7.1-5.69 11.91-11.47 14.41-18.34 3.07-8.45 4.89-12.1 6.8-13.39 1.73-1.16 3.36-.53 6.18 1.9.63.56 3.4 3.08 4.11 3.7 1.93 1.7 3.71 3.15 5.67 4.55zm-.6.8c-1.98-1.42-3.79-2.88-5.74-4.6-.73-.64-3.48-3.16-4.1-3.7-2.5-2.16-3.75-2.65-4.97-1.83-1.66 1.11-3.44 4.7-6.42 12.9-2.57 7.07-7.5 12.99-14.72 18.78-2.91 2.33-12.21 8.8-13.52 9.9-3.22 2.68-3.56 4.17-.97 5.72l2.26 1.4 1.99 1.28c1.47.93 2.48 1.5 3.47 1.91 4.9 2.07 9.96.07 22.72-9.56 5.02-3.79 11.15-7 19.1-10.28 3.76-1.55 17.3-6.6 20-7.72 9.5-3.97 13.14-7.2 11.73-11.44-.9-2.71-2.25-3.8-4.3-3.79-1.6 0-2.15.17-7.36 2.05-8.17 2.94-13.34 3.14-19.16-1.01z'/%3E%3C/svg%3E") -} - -.hover\:bg-gray-50:hover { - --tw-bg-opacity: 1; - background-color: rgb(249 250 251/var(--tw-bg-opacity)) -} - -.hover\:bg-indigo-400:hover { - --tw-bg-opacity: 1; - background-color: #ffa116; -} - -.hover\:bg-indigo-600:hover { - --tw-bg-opacity: 1; - background-color: rgb(79 70 229/var(--tw-bg-opacity)) -} - -.hover\:bg-indigo-300:hover { - --tw-bg-opacity: 1; - background-color: rgb(165 180 252/var(--tw-bg-opacity)) -} - -.hover\:bg-green-600:hover { - --tw-bg-opacity: 1; - background-color: rgb(22 163 74/var(--tw-bg-opacity)) -} - -.hover\:bg-green-400:hover { - --tw-bg-opacity: 1; - background-color: rgb(74 222 128/var(--tw-bg-opacity)) -} - -.hover\:bg-yellow-600:hover { - --tw-bg-opacity: 1; - background-color: rgb(202 138 4/var(--tw-bg-opacity)) -} - -.hover\:bg-yellow-400:hover { - --tw-bg-opacity: 1; - background-color: rgb(250 204 21/var(--tw-bg-opacity)) -} - -.hover\:bg-red-600:hover { - --tw-bg-opacity: 1; - background-color: rgb(220 38 38/var(--tw-bg-opacity)) -} - -.hover\:bg-red-400:hover { - --tw-bg-opacity: 1; - background-color: rgb(248 113 113/var(--tw-bg-opacity)) -} - -.hover\:bg-indigo-500:hover { - --tw-bg-opacity: 1; - background-color: rgb(99 102 241/var(--tw-bg-opacity)) -} - -.hover\:bg-gray-100:hover { - --tw-bg-opacity: 1; - background-color: rgb(243 244 246/var(--tw-bg-opacity)) -} - -.hover\:text-gray-600:hover { - --tw-text-opacity: 1; - color: rgb(75 85 99/var(--tw-text-opacity)) -} - -.hover\:text-gray-700:hover { - --tw-text-opacity: 1; - color: rgb(55 65 81/var(--tw-text-opacity)) -} - -.hover\:text-red-500:hover { - --tw-text-opacity: 1; - color: rgb(239 68 68/var(--tw-text-opacity)) -} - -.hover\:text-pink-500:hover { - --tw-text-opacity: 1; - color: rgb(236 72 153/var(--tw-text-opacity)) -} - -.hover\:text-yellow-500:hover { - --tw-text-opacity: 1; - color: rgb(234 179 8/var(--tw-text-opacity)) -} - -.hover\:text-green-500:hover { - --tw-text-opacity: 1; - color: rgb(34 197 94/var(--tw-text-opacity)) -} - -.hover\:text-white:hover { - --tw-text-opacity: 1; - color: rgb(255 255 255/var(--tw-text-opacity)) -} - -.hover\:text-indigo-500:hover { - --tw-text-opacity: 1; - color: #ffa116; -} - -.hover\:text-blue-500:hover { - --tw-text-opacity: 1; - color: rgb(59 130 246/var(--tw-text-opacity)) -} - -.hover\:underline:hover { - -webkit-text-decoration-line: underline; - text-decoration-line: underline -} - -.focus\:w-44:focus { - width: 11rem -} - -.focus\:border-indigo-400:focus { - --tw-border-opacity: 1; - border-color: #ffa116; -} - -.focus\:border-green-400:focus { - --tw-border-opacity: 1; - border-color: rgb(74 222 128/var(--tw-border-opacity)) -} - -.focus\:border-red-400:focus { - --tw-border-opacity: 1; - border-color: rgb(248 113 113/var(--tw-border-opacity)) -} - -.focus\:bg-indigo-500:focus { - --tw-bg-opacity: 1; - background-color: rgb(255, 161, 22/var(--tw-border-opacity)); -} - -.focus\:bg-green-500:focus { - --tw-bg-opacity: 1; - background-color: rgb(34 197 94/var(--tw-bg-opacity)) -} - -.focus\:bg-yellow-500:focus { - --tw-bg-opacity: 1; - background-color: rgb(234 179 8/var(--tw-bg-opacity)) -} - -.focus\:bg-red-500:focus { - --tw-bg-opacity: 1; - background-color: rgb(239 68 68/var(--tw-bg-opacity)) -} - -.focus\:bg-indigo-400:focus { - --tw-bg-opacity: 1; - background-color: rgb(129 140 248/var(--tw-bg-opacity)) -} - -.focus\:text-indigo-500:focus { - --tw-text-opacity: 1; - color: rgb(99 102 241/var(--tw-text-opacity)) -} - -.focus\:underline:focus { - -webkit-text-decoration-line: underline; - text-decoration-line: underline -} - -.focus\:outline-none:focus { - outline: 2px solid transparent; - outline-offset: 2px -} - -.focus\:ring:focus { - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px + var(--tw-ring-offset-width)) var(--tw-ring-color) -} - -.focus\:ring-2:focus,.focus\:ring:focus { - box-shadow: var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000) -} - -.focus\:ring-2:focus { - --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); - --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color) -} - -.focus\:ring-gray-400:focus { - --tw-ring-opacity: 1; - --tw-ring-color: rgb(156 163 175/var(--tw-ring-opacity)) -} - -.focus\:ring-indigo-300:focus { - --tw-ring-opacity: 1; - --tw-ring-color: #fac678; -} - -.focus\:ring-green-300:focus { - --tw-ring-opacity: 1; - --tw-ring-color: rgb(134 239 172/var(--tw-ring-opacity)) -} - -.focus\:ring-red-300:focus { - --tw-ring-opacity: 1; - --tw-ring-color: rgb(252 165 165/var(--tw-ring-opacity)) -} - -.focus\:ring-indigo-100:focus { - --tw-ring-opacity: 1; - --tw-ring-color: rgb(224 231 255/var(--tw-ring-opacity)) -} - -.focus\:ring-indigo-500:focus { - --tw-ring-opacity: 1; - --tw-ring-color: rgb(99 102 241/var(--tw-ring-opacity)) -} - -.focus\:ring-white:focus { - --tw-ring-opacity: 1; - --tw-ring-color: rgb(255 255 255/var(--tw-ring-opacity)) -} - -.focus\:ring-yellow-300:focus { - --tw-ring-opacity: 1; - --tw-ring-color: rgb(253 224 71/var(--tw-ring-opacity)) -} - -.focus\:ring-opacity-50:focus { - --tw-ring-opacity: 0.5 -} - -.focus\:ring-opacity-40:focus { - --tw-ring-opacity: 0.4 -} - -.active\:outline-none:active { - outline: 2px solid transparent; - outline-offset: 2px -} - -[dir=rtl] .rtl\:right-4 { - right: 1rem -} - -[dir=rtl] .rtl\:left-auto { - left: auto -} - -[dir=rtl] .rtl\:right-auto { - right: auto -} - -[dir=rtl] .rtl\:left-3 { - left: .75rem -} - -[dir=rtl] .rtl\:left-0 { - left: 0 -} - -[dir=rtl] .rtl\:rotate-180 { - --tw-rotate: 180deg; - transform: translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)) -} - -[dir=rtl] .rtl\:pl-24 { - padding-left: 6rem -} - -[dir=rtl] .rtl\:pr-3 { - padding-right: .75rem -} - -[dir=rtl] .rtl\:pr-14 { - padding-right: 3.5rem -} - -[dir=rtl] .rtl\:text-right { - text-align: right -} - -@media (min-width: 640px) { - .sm\:order-3 { - order:3 - } - - .sm\:mx-4 { - margin-left: 1rem; - margin-right: 1rem - } - - .sm\:ml-2 { - margin-left: .5rem - } - - .sm\:block { - display: block - } - - .sm\:flex { - display: flex - } - - .sm\:h-64 { - height: 16rem - } - - .sm\:h-32 { - height: 8rem - } - - .sm\:w-auto { - width: auto - } - - .sm\:w-32 { - width: 8rem - } - - .sm\:w-80 { - width: 20rem - } - - .sm\:w-64 { - width: 16rem - } - - .sm\:translate-y-0 { - --tw-translate-y: 0px - } - - .sm\:scale-95,.sm\:translate-y-0 { - transform: translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)) - } - - .sm\:scale-95 { - --tw-scale-x: .95; - --tw-scale-y: .95 - } - - .sm\:scale-100 { - --tw-scale-x: 1; - --tw-scale-y: 1; - transform: translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)) - } - - .sm\:grid-cols-2 { - grid-template-columns: repeat(2,minmax(0,1fr)) - } - - .sm\:flex-row { - flex-direction: row - } - - .sm\:items-start { - align-items: flex-start - } - - .sm\:justify-between { - justify-content: space-between - } - - .sm\:space-y-0>:not([hidden])~:not([hidden]) { - --tw-space-y-reverse: 0; - margin-bottom: calc(0px*var(--tw-space-y-reverse)); - margin-top: calc(0px*(1 - var(--tw-space-y-reverse))) - } - - .sm\:p-0 { - padding: 0 - } - - .sm\:p-3 { - padding: .75rem - } - - .sm\:px-6 { - padding-left: 1.5rem; - padding-right: 1.5rem - } - - .sm\:px-12 { - padding-left: 3rem; - padding-right: 3rem - } - - .sm\:text-xl { - font-size: 1.25rem; - line-height: 1.75rem - } - - .sm\:text-3xl { - font-size: 1.875rem; - line-height: 2.25rem - } - - .sm\:focus\:w-80:focus { - width: 20rem - } -} - -@media (min-width: 768px) { - .md\:my-6 { - margin-bottom:1.5rem; - margin-top: 1.5rem - } - - .md\:mt-0 { - margin-top: 0 - } - - .md\:flex { - display: flex - } - - .md\:h-64 { - height: 16rem - } - - .md\:w-auto { - width: auto - } - - .md\:grid-cols-4 { - grid-template-columns: repeat(4,minmax(0,1fr)) - } - - .md\:items-center { - align-items: center - } - - .md\:justify-between { - justify-content: space-between - } - - .md\:text-9xl { - font-size: 8rem; - line-height: 1 - } - - .md\:text-3xl { - font-size: 1.875rem; - line-height: 2.25rem - } - - .md\:text-2xl { - font-size: 1.5rem; - line-height: 2rem - } -} - -@media (min-width: 1024px) { - .lg\:relative { - position:relative - } - - .lg\:inset-0 { - bottom: 0; - left: 0; - right: 0; - top: 0 - } - - .lg\:inset-x-auto { - left: auto; - right: auto - } - - .lg\:right-8 { - right: 2rem - } - - .lg\:mx-0 { - margin-left: 0; - margin-right: 0 - } - - .lg\:flex { - display: flex - } - - .lg\:hidden { - display: none - } - - .lg\:w-96 { - width: 24rem - } - - .lg\:translate-x-0 { - --tw-translate-x: 0px; - transform: translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y)) - } - - .lg\:items-center { - align-items: center - } - - .lg\:px-0 { - padding-left: 0; - padding-right: 0 - } - - .lg\:px-8 { - padding-left: 2rem; - padding-right: 2rem - } - - [dir=rtl] .rtl\:lg\:left-8 { - left: 2rem - } - - [dir=rtl] .rtl\:lg\:right-auto { - right: auto - } -} - -@media (min-width: 1280px) { - .xl\:right-10 { - right:2.5rem - } - - .xl\:bottom-8 { - bottom: 2rem - } - - .xl\:-mx-3 { - margin-left: -.75rem; - margin-right: -.75rem - } - - .xl\:mx-3 { - margin-left: .75rem; - margin-right: .75rem - } - - .xl\:-mx-8 { - margin-left: -2rem; - margin-right: -2rem - } - - .xl\:flex { - display: flex - } - - .xl\:w-2\/3 { - width: 66.666667% - } - - .xl\:w-1\/3 { - width: 33.333333% - } - - .xl\:grid-cols-4 { - grid-template-columns: repeat(4,minmax(0,1fr)) - } - - .xl\:grid-cols-2 { - grid-template-columns: repeat(2,minmax(0,1fr)) - } - - .xl\:space-y-0>:not([hidden])~:not([hidden]) { - --tw-space-y-reverse: 0; - margin-bottom: calc(0px*var(--tw-space-y-reverse)); - margin-top: calc(0px*(1 - var(--tw-space-y-reverse))) - } - - .xl\:p-6 { - padding: 1.5rem - } - - .xl\:p-8 { - padding: 2rem - } - - [dir=rtl] .rtl\:xl\:left-10 { - left: 2.5rem - } - - [dir=rtl] .rtl\:xl\:right-auto { - right: auto - } -} - -@media (min-width: 1536px) { - .\32xl\:-mx-3 { - margin-left:-.75rem; - margin-right: -.75rem - } - - .\32xl\:mx-3 { - margin-left: .75rem; - margin-right: .75rem - } - - .\32xl\:mt-0 { - margin-top: 0 - } - - .\32xl\:flex { - display: flex - } - - .\32xl\:w-1\/2 { - width: 50% - } - - .\32xl\:max-w-2xl { - max-width: 42rem - } -} - - -/* .dark-bg { - background-color: #1a1a1a; -} - -.dark-nav { - background-color: #282828; -} */ - -#submit { - background-color: #ffa116; -} -#submit:hover { - background-color: #fcb140; -} -#submit:focus { - box-shadow: 0 0 5px 2px rgb(255, 188, 87); -} - -#apply { - background-color: #ffa116; -} -#apply:hover { - background-color: #fcb140; -} -#apply:focus { - box-shadow: 0 0 5px 2px rgb(255, 188, 87); -} - -.dark { - filter: invert(92%); -} - -.dark .no-invert { - filter: invert(92%); -} \ No newline at end of file diff --git a/spaces/DragGan/DragGan-Inversion/viz/drag_widget.py b/spaces/DragGan/DragGan-Inversion/viz/drag_widget.py deleted file mode 100644 index 348ab36de2daff2eff97589204b00d391b3a1e7e..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/viz/drag_widget.py +++ /dev/null @@ -1,173 +0,0 @@ -import os -import torch -import numpy as np -import imgui -import dnnlib -from gui_utils import imgui_utils - -# ---------------------------------------------------------------------------- - - -class DragWidget: - def __init__(self, viz): - self.viz = viz - self.point = [-1, -1] - self.points = [] - self.targets = [] - self.is_point = True - self.last_click = False - self.is_drag = False - self.iteration = 0 - self.mode = 'point' - self.r_mask = 50 - self.show_mask = False - self.mask = torch.ones(256, 256) - self.lambda_mask = 20 - self.feature_idx = 5 - self.r1 = 3 - self.r2 = 12 - self.path = os.path.abspath(os.path.join( - os.path.dirname(__file__), '..', '_screenshots')) - self.defer_frames = 0 - self.disabled_time = 0 - - def action(self, click, down, x, y): - if self.mode == 'point': - self.add_point(click, x, y) - elif down: - self.draw_mask(x, y) - - def add_point(self, click, x, y): - if click: - self.point = [y, x] - elif self.last_click: - if self.is_drag: - self.stop_drag() - if self.is_point: - self.points.append(self.point) - self.is_point = False - else: - self.targets.append(self.point) - self.is_point = True - self.last_click = click - - def init_mask(self, w, h): - self.width, self.height = w, h - self.mask = torch.ones(h, w) - - def draw_mask(self, x, y): - X = torch.linspace(0, self.width, self.width) - Y = torch.linspace(0, self.height, self.height) - yy, xx = torch.meshgrid(Y, X) - circle = (xx - x)**2 + (yy - y)**2 < self.r_mask**2 - if self.mode == 'flexible': - self.mask[circle] = 0 - elif self.mode == 'fixed': - self.mask[circle] = 1 - - def stop_drag(self): - self.is_drag = False - self.iteration = 0 - - def set_points(self, points): - self.points = points - - def reset_point(self): - self.points = [] - self.targets = [] - self.is_point = True - - def load_points(self, suffix): - points = [] - point_path = self.path + f'_{suffix}.txt' - try: - with open(point_path, "r") as f: - for line in f.readlines(): - y, x = line.split() - points.append([int(y), int(x)]) - except: - print(f'Wrong point file path: {point_path}') - return points - - @imgui_utils.scoped_by_object_id - def __call__(self, show=True): - viz = self.viz - reset = False - if show: - with imgui_utils.grayed_out(self.disabled_time != 0): - imgui.text('Drag') - imgui.same_line(viz.label_w) - - if imgui_utils.button('Add point', width=viz.button_w, enabled='image' in viz.result): - self.mode = 'point' - - imgui.same_line() - reset = False - if imgui_utils.button('Reset point', width=viz.button_w, enabled='image' in viz.result): - self.reset_point() - reset = True - - imgui.text(' ') - imgui.same_line(viz.label_w) - if imgui_utils.button('Start', width=viz.button_w, enabled='image' in viz.result): - self.is_drag = True - if len(self.points) > len(self.targets): - self.points = self.points[:len(self.targets)] - - imgui.same_line() - if imgui_utils.button('Stop', width=viz.button_w, enabled='image' in viz.result): - self.stop_drag() - - imgui.text(' ') - imgui.same_line(viz.label_w) - imgui.text(f'Steps: {self.iteration}') - - imgui.text('Mask') - imgui.same_line(viz.label_w) - if imgui_utils.button('Flexible area', width=viz.button_w, enabled='image' in viz.result): - self.mode = 'flexible' - self.show_mask = True - - imgui.same_line() - if imgui_utils.button('Fixed area', width=viz.button_w, enabled='image' in viz.result): - self.mode = 'fixed' - self.show_mask = True - - imgui.text(' ') - imgui.same_line(viz.label_w) - if imgui_utils.button('Reset mask', width=viz.button_w, enabled='image' in viz.result): - self.mask = torch.ones(self.height, self.width) - imgui.same_line() - _clicked, self.show_mask = imgui.checkbox( - 'Show mask', self.show_mask) - - imgui.text(' ') - imgui.same_line(viz.label_w) - with imgui_utils.item_width(viz.font_size * 6): - changed, self.r_mask = imgui.input_int( - 'Radius', self.r_mask) - - imgui.text(' ') - imgui.same_line(viz.label_w) - with imgui_utils.item_width(viz.font_size * 6): - changed, self.lambda_mask = imgui.input_int( - 'Lambda', self.lambda_mask) - - self.disabled_time = max(self.disabled_time - viz.frame_delta, 0) - if self.defer_frames > 0: - self.defer_frames -= 1 - viz.args.is_drag = self.is_drag - if self.is_drag: - self.iteration += 1 - viz.args.iteration = self.iteration - viz.args.points = [point for point in self.points] - viz.args.targets = [point for point in self.targets] - viz.args.mask = self.mask - viz.args.lambda_mask = self.lambda_mask - viz.args.feature_idx = self.feature_idx - viz.args.r1 = self.r1 - viz.args.r2 = self.r2 - viz.args.reset = reset - - -# ---------------------------------------------------------------------------- diff --git a/spaces/Duskfallcrew/MagicDreamlike/README.md b/spaces/Duskfallcrew/MagicDreamlike/README.md deleted file mode 100644 index f88bf549a9f7fcb4089d82175a7f19734978debb..0000000000000000000000000000000000000000 --- a/spaces/Duskfallcrew/MagicDreamlike/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Dreamlike Art Dreamlike Photoreal 2.0 -emoji: 🌍 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.16.0 -app_file: app.py -pinned: false -duplicated_from: Lawlieties/dreamlike-art-dreamlike-photoreal-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Eddycrack864/Applio-Inference/Fixes/local_fixes.py b/spaces/Eddycrack864/Applio-Inference/Fixes/local_fixes.py deleted file mode 100644 index 8a418076eee6f65fe06eb0f607061796b839c1ee..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/Fixes/local_fixes.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -import sys -import time -import shutil -import requests -import zipfile - -def insert_new_line(file_name, line_to_find, text_to_insert): - lines = [] - with open(file_name, 'r', encoding='utf-8') as read_obj: - lines = read_obj.readlines() - already_exists = False - with open(file_name + '.tmp', 'w', encoding='utf-8') as write_obj: - for i in range(len(lines)): - write_obj.write(lines[i]) - if lines[i].strip() == line_to_find: - # If next line exists and starts with sys.path.append, skip - if i+1 < len(lines) and lines[i+1].strip().startswith("sys.path.append"): - print('It was already fixed! Skip adding a line...') - already_exists = True - break - else: - write_obj.write(text_to_insert + '\n') - # If no existing sys.path.append line was found, replace the original file - if not already_exists: - os.replace(file_name + '.tmp', file_name) - return True - else: - # If existing line was found, delete temporary file - os.remove(file_name + '.tmp') - return False - -def replace_in_file(file_name, old_text, new_text): - with open(file_name, 'r', encoding='utf-8') as file: - file_contents = file.read() - - if old_text in file_contents: - file_contents = file_contents.replace(old_text, new_text) - with open(file_name, 'w', encoding='utf-8') as file: - file.write(file_contents) - return True - - return False - -if __name__ == "__main__": - current_path = os.getcwd() - file_name = os.path.join(current_path, "infer", "modules", "train", "extract", "extract_f0_print.py") - line_to_find = 'import numpy as np, logging' - text_to_insert = "sys.path.append(r'" + current_path + "')" - - - success_1 = insert_new_line(file_name, line_to_find, text_to_insert) - if success_1: - print('The first operation was successful!') - else: - print('He skipped the first operation because it was already fixed!') - - file_name = 'infer-web.py' - old_text = 'with gr.Blocks(theme=gr.themes.Soft()) as app:' - new_text = 'with gr.Blocks() as app:' - - success_2 = replace_in_file(file_name, old_text, new_text) - if success_2: - print('The second operation was successful!') - else: - print('The second operation was omitted because it was already fixed!') - - print('Local corrections successful! You should now be able to infer and train locally in Applio RVC Fork.') - - time.sleep(5) - -def find_torchcrepe_directory(directory): - """ - Recursively searches for the topmost folder named 'torchcrepe' within a directory. - Returns the path of the directory found or None if none is found. - """ - for root, dirs, files in os.walk(directory): - if 'torchcrepe' in dirs: - return os.path.join(root, 'torchcrepe') - return None - -def download_and_extract_torchcrepe(): - url = 'https://github.com/maxrmorrison/torchcrepe/archive/refs/heads/master.zip' - temp_dir = 'temp_torchcrepe' - destination_dir = os.getcwd() - - try: - torchcrepe_dir_path = os.path.join(destination_dir, 'torchcrepe') - - if os.path.exists(torchcrepe_dir_path): - print("Skipping the torchcrepe download. The folder already exists.") - return - - # Download the file - print("Starting torchcrepe download...") - response = requests.get(url) - - # Raise an error if the GET request was unsuccessful - response.raise_for_status() - print("Download completed.") - - # Save the downloaded file - zip_file_path = os.path.join(temp_dir, 'master.zip') - os.makedirs(temp_dir, exist_ok=True) - with open(zip_file_path, 'wb') as file: - file.write(response.content) - print(f"Zip file saved to {zip_file_path}") - - # Extract the zip file - print("Extracting content...") - with zipfile.ZipFile(zip_file_path, 'r') as zip_file: - zip_file.extractall(temp_dir) - print("Extraction completed.") - - # Locate the torchcrepe folder and move it to the destination directory - torchcrepe_dir = find_torchcrepe_directory(temp_dir) - if torchcrepe_dir: - shutil.move(torchcrepe_dir, destination_dir) - print(f"Moved the torchcrepe directory to {destination_dir}!") - else: - print("The torchcrepe directory could not be located.") - - except Exception as e: - print("Torchcrepe not successfully downloaded", e) - - # Clean up temporary directory - if os.path.exists(temp_dir): - shutil.rmtree(temp_dir) - -# Run the function -download_and_extract_torchcrepe() - -temp_dir = 'temp_torchcrepe' - -if os.path.exists(temp_dir): - shutil.rmtree(temp_dir) diff --git a/spaces/Eddycrack864/Applio-Inference/infer/modules/uvr5/preprocess.py b/spaces/Eddycrack864/Applio-Inference/infer/modules/uvr5/preprocess.py deleted file mode 100644 index 19f11110ea822eeb140fb885c600536290a1adff..0000000000000000000000000000000000000000 --- a/spaces/Eddycrack864/Applio-Inference/infer/modules/uvr5/preprocess.py +++ /dev/null @@ -1,346 +0,0 @@ -import os -import logging - -logger = logging.getLogger(__name__) - -import librosa -import numpy as np -import soundfile as sf -import torch - -from infer.lib.uvr5_pack.lib_v5 import nets_61968KB as Nets -from infer.lib.uvr5_pack.lib_v5 import spec_utils -from infer.lib.uvr5_pack.lib_v5.model_param_init import ModelParameters -from infer.lib.uvr5_pack.lib_v5.nets_new import CascadedNet -from infer.lib.uvr5_pack.utils import inference - - -class AudioPre: - def __init__(self, agg, model_path, device, is_half): - self.model_path = model_path - self.device = device - self.data = { - # Processing Options - "postprocess": False, - "tta": False, - # Constants - "window_size": 512, - "agg": agg, - "high_end_process": "mirroring", - } - mp = ModelParameters("infer/lib/uvr5_pack/lib_v5/modelparams/4band_v2.json") - model = Nets.CascadedASPPNet(mp.param["bins"] * 2) - cpk = torch.load(model_path, map_location="cpu") - model.load_state_dict(cpk) - model.eval() - if is_half: - model = model.half().to(device) - else: - model = model.to(device) - - self.mp = mp - self.model = model - - def _path_audio_(self, music_file, ins_root=None, vocal_root=None, format="flac"): - if ins_root is None and vocal_root is None: - return "No save root." - name = os.path.basename(music_file) - if ins_root is not None: - os.makedirs(ins_root, exist_ok=True) - if vocal_root is not None: - os.makedirs(vocal_root, exist_ok=True) - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - bands_n = len(self.mp.param["band"]) - # print(bands_n) - for d in range(bands_n, 0, -1): - bp = self.mp.param["band"][d] - if d == bands_n: # high-end band - ( - X_wave[d], - _, - ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 - music_file, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - if X_wave[d].ndim == 1: - X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) - else: # lower bands - X_wave[d] = librosa.core.resample( - X_wave[d + 1], - self.mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - # Stft of wave source - X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( - X_wave[d], - bp["hl"], - bp["n_fft"], - self.mp.param["mid_side"], - self.mp.param["mid_side_b2"], - self.mp.param["reverse"], - ) - # pdb.set_trace() - if d == bands_n and self.data["high_end_process"] != "none": - input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( - self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] - ) - input_high_end = X_spec_s[d][ - :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : - ] - - X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) - aggresive_set = float(self.data["agg"] / 100) - aggressiveness = { - "value": aggresive_set, - "split_bin": self.mp.param["band"][1]["crop_stop"], - } - with torch.no_grad(): - pred, X_mag, X_phase = inference( - X_spec_m, self.device, self.model, aggressiveness, self.data - ) - # Postprocess - if self.data["postprocess"]: - pred_inv = np.clip(X_mag - pred, 0, np.inf) - pred = spec_utils.mask_silence(pred, pred_inv) - y_spec_m = pred * X_phase - v_spec_m = X_spec_m - y_spec_m - - if ins_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], y_spec_m, input_high_end, self.mp - ) - wav_instrument = spec_utils.cmb_spectrogram_to_wave( - y_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) - logger.info("%s instruments done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - ins_root, - "instrument_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) # - else: - path = os.path.join( - ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - if vocal_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], v_spec_m, input_high_end, self.mp - ) - wav_vocals = spec_utils.cmb_spectrogram_to_wave( - v_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) - logger.info("%s vocals done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - vocal_root, - "vocal_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - else: - path = os.path.join( - vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - - -class AudioPreDeEcho: - def __init__(self, agg, model_path, device, is_half): - self.model_path = model_path - self.device = device - self.data = { - # Processing Options - "postprocess": False, - "tta": False, - # Constants - "window_size": 512, - "agg": agg, - "high_end_process": "mirroring", - } - mp = ModelParameters("infer/lib/uvr5_pack/lib_v5/modelparams/4band_v3.json") - nout = 64 if "DeReverb" in model_path else 48 - model = CascadedNet(mp.param["bins"] * 2, nout) - cpk = torch.load(model_path, map_location="cpu") - model.load_state_dict(cpk) - model.eval() - if is_half: - model = model.half().to(device) - else: - model = model.to(device) - - self.mp = mp - self.model = model - - def _path_audio_( - self, music_file, vocal_root=None, ins_root=None, format="flac" - ): # 3个VR模型vocal和ins是反的 - if ins_root is None and vocal_root is None: - return "No save root." - name = os.path.basename(music_file) - if ins_root is not None: - os.makedirs(ins_root, exist_ok=True) - if vocal_root is not None: - os.makedirs(vocal_root, exist_ok=True) - X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {} - bands_n = len(self.mp.param["band"]) - # print(bands_n) - for d in range(bands_n, 0, -1): - bp = self.mp.param["band"][d] - if d == bands_n: # high-end band - ( - X_wave[d], - _, - ) = librosa.core.load( # 理论上librosa读取可能对某些音频有bug,应该上ffmpeg读取,但是太麻烦了弃坑 - music_file, - bp["sr"], - False, - dtype=np.float32, - res_type=bp["res_type"], - ) - if X_wave[d].ndim == 1: - X_wave[d] = np.asfortranarray([X_wave[d], X_wave[d]]) - else: # lower bands - X_wave[d] = librosa.core.resample( - X_wave[d + 1], - self.mp.param["band"][d + 1]["sr"], - bp["sr"], - res_type=bp["res_type"], - ) - # Stft of wave source - X_spec_s[d] = spec_utils.wave_to_spectrogram_mt( - X_wave[d], - bp["hl"], - bp["n_fft"], - self.mp.param["mid_side"], - self.mp.param["mid_side_b2"], - self.mp.param["reverse"], - ) - # pdb.set_trace() - if d == bands_n and self.data["high_end_process"] != "none": - input_high_end_h = (bp["n_fft"] // 2 - bp["crop_stop"]) + ( - self.mp.param["pre_filter_stop"] - self.mp.param["pre_filter_start"] - ) - input_high_end = X_spec_s[d][ - :, bp["n_fft"] // 2 - input_high_end_h : bp["n_fft"] // 2, : - ] - - X_spec_m = spec_utils.combine_spectrograms(X_spec_s, self.mp) - aggresive_set = float(self.data["agg"] / 100) - aggressiveness = { - "value": aggresive_set, - "split_bin": self.mp.param["band"][1]["crop_stop"], - } - with torch.no_grad(): - pred, X_mag, X_phase = inference( - X_spec_m, self.device, self.model, aggressiveness, self.data - ) - # Postprocess - if self.data["postprocess"]: - pred_inv = np.clip(X_mag - pred, 0, np.inf) - pred = spec_utils.mask_silence(pred, pred_inv) - y_spec_m = pred * X_phase - v_spec_m = X_spec_m - y_spec_m - - if ins_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], y_spec_m, input_high_end, self.mp - ) - wav_instrument = spec_utils.cmb_spectrogram_to_wave( - y_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_instrument = spec_utils.cmb_spectrogram_to_wave(y_spec_m, self.mp) - logger.info("%s instruments done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - ins_root, - "instrument_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) # - else: - path = os.path.join( - ins_root, "instrument_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_instrument) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) - if vocal_root is not None: - if self.data["high_end_process"].startswith("mirroring"): - input_high_end_ = spec_utils.mirroring( - self.data["high_end_process"], v_spec_m, input_high_end, self.mp - ) - wav_vocals = spec_utils.cmb_spectrogram_to_wave( - v_spec_m, self.mp, input_high_end_h, input_high_end_ - ) - else: - wav_vocals = spec_utils.cmb_spectrogram_to_wave(v_spec_m, self.mp) - logger.info("%s vocals done" % name) - if format in ["wav", "flac"]: - sf.write( - os.path.join( - vocal_root, - "vocal_{}_{}.{}".format(name, self.data["agg"], format), - ), - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - else: - path = os.path.join( - vocal_root, "vocal_{}_{}.wav".format(name, self.data["agg"]) - ) - sf.write( - path, - (np.array(wav_vocals) * 32768).astype("int16"), - self.mp.param["sr"], - ) - if os.path.exists(path): - os.system( - "ffmpeg -i %s -vn %s -q:a 2 -y" - % (path, path[:-4] + ".%s" % format) - ) diff --git a/spaces/Ekimetrics/climate-question-answering/climateqa/retriever.py b/spaces/Ekimetrics/climate-question-answering/climateqa/retriever.py deleted file mode 100644 index c0ae2eb87affa9646d8128c833e6bfb2c1435a4b..0000000000000000000000000000000000000000 --- a/spaces/Ekimetrics/climate-question-answering/climateqa/retriever.py +++ /dev/null @@ -1,149 +0,0 @@ -# https://github.com/langchain-ai/langchain/issues/8623 - -import pandas as pd - -from langchain.schema.retriever import BaseRetriever, Document -from langchain.vectorstores.base import VectorStoreRetriever -from langchain.vectorstores import VectorStore -from langchain.callbacks.manager import CallbackManagerForRetrieverRun -from typing import List -from pydantic import Field - -class ClimateQARetriever(BaseRetriever): - vectorstore:VectorStore - sources:list = ["IPCC","IPBES"] - threshold:float = 22 - k_summary:int = 3 - k_total:int = 10 - namespace:str = "vectors" - - def get_relevant_documents(self, query: str) -> List[Document]: - - # Check if all elements in the list are either IPCC or IPBES - assert isinstance(self.sources,list) - assert all([x in ["IPCC","IPBES"] for x in self.sources]) - assert self.k_total > self.k_summary, "k_total should be greater than k_summary" - - # Prepare base search kwargs - filters = { - "source": { "$in":self.sources}, - } - - # Search for k_summary documents in the summaries dataset - filters_summaries = { - **filters, - "report_type": { "$in":["SPM","TS"]}, - } - docs_summaries = self.vectorstore.similarity_search_with_score(query=query,namespace = self.namespace,filter = filters_summaries,k = self.k_summary) - docs_summaries = [x for x in docs_summaries if x[1] > self.threshold] - - # Search for k_total - k_summary documents in the full reports dataset - filters_full = { - **filters, - "report_type": { "$nin":["SPM","TS"]}, - } - k_full = self.k_total - len(docs_summaries) - docs_full = self.vectorstore.similarity_search_with_score(query=query,namespace = self.namespace,filter = filters_full,k = k_full) - - # Concatenate documents - docs = docs_summaries + docs_full - - # Filter if scores are below threshold - docs = [x for x in docs if x[1] > self.threshold] - - # Add score to metadata - results = [] - for i,(doc,score) in enumerate(docs): - doc.metadata["similarity_score"] = score - doc.metadata["content"] = doc.page_content - doc.metadata["page_number"] = int(doc.metadata["page_number"]) - doc.page_content = f"""Doc {i+1} - {doc.metadata['short_name']}: {doc.page_content}""" - results.append(doc) - - return results - - - - - -# def filter_summaries(df,k_summary = 3,k_total = 10): -# # assert source in ["IPCC","IPBES","ALL"], "source arg should be in (IPCC,IPBES,ALL)" - -# # # Filter by source -# # if source == "IPCC": -# # df = df.loc[df["source"]=="IPCC"] -# # elif source == "IPBES": -# # df = df.loc[df["source"]=="IPBES"] -# # else: -# # pass - -# # Separate summaries and full reports -# df_summaries = df.loc[df["report_type"].isin(["SPM","TS"])] -# df_full = df.loc[~df["report_type"].isin(["SPM","TS"])] - -# # Find passages from summaries dataset -# passages_summaries = df_summaries.head(k_summary) - -# # Find passages from full reports dataset -# passages_fullreports = df_full.head(k_total - len(passages_summaries)) - -# # Concatenate passages -# passages = pd.concat([passages_summaries,passages_fullreports],axis = 0,ignore_index = True) -# return passages - - - - -# def retrieve_with_summaries(query,retriever,k_summary = 3,k_total = 10,sources = ["IPCC","IPBES"],max_k = 100,threshold = 0.555,as_dict = True,min_length = 300): -# assert max_k > k_total - -# validated_sources = ["IPCC","IPBES"] -# sources = [x for x in sources if x in validated_sources] -# filters = { -# "source": { "$in": sources }, -# } -# print(filters) - -# # Retrieve documents -# docs = retriever.retrieve(query,top_k = max_k,filters = filters) - -# # Filter by score -# docs = [{**x.meta,"score":x.score,"content":x.content} for x in docs if x.score > threshold] - -# if len(docs) == 0: -# return [] -# res = pd.DataFrame(docs) -# passages_df = filter_summaries(res,k_summary,k_total) -# if as_dict: -# contents = passages_df["content"].tolist() -# meta = passages_df.drop(columns = ["content"]).to_dict(orient = "records") -# passages = [] -# for i in range(len(contents)): -# passages.append({"content":contents[i],"meta":meta[i]}) -# return passages -# else: -# return passages_df - - - -# def retrieve(query,sources = ["IPCC"],threshold = 0.555,k = 10): - - -# print("hellooooo") - -# # Reformulate queries -# reformulated_query,language = reformulate(query) - -# print(reformulated_query) - -# # Retrieve documents -# passages = retrieve_with_summaries(reformulated_query,retriever,k_total = k,k_summary = 3,as_dict = True,sources = sources,threshold = threshold) -# response = { -# "query":query, -# "reformulated_query":reformulated_query, -# "language":language, -# "sources":passages, -# "prompts":{"init_prompt":init_prompt,"sources_prompt":sources_prompt}, -# } -# return response - diff --git a/spaces/EmbeddedAndrew/examin8/app.py b/spaces/EmbeddedAndrew/examin8/app.py deleted file mode 100644 index 15befba83dd1ad91ab11e5eb1dddb74b97b76dd1..0000000000000000000000000000000000000000 --- a/spaces/EmbeddedAndrew/examin8/app.py +++ /dev/null @@ -1,99 +0,0 @@ -import datetime -import os - -import gradio as gr -import langchain -import weaviate -from langchain.vectorstores import Weaviate - -from chain import get_new_chain1 - -WEAVIATE_URL = os.environ["WEAVIATE_URL"] - - -def get_weaviate_store(): - client = weaviate.Client( - url=WEAVIATE_URL, - additional_headers={"X-OpenAI-Api-Key": os.environ["OPENAI_API_KEY"]}, - ) - return Weaviate(client, "Paragraph", "content", attributes=["source"]) - - -def set_openai_api_key(api_key, agent): - if api_key: - os.environ["OPENAI_API_KEY"] = api_key - vectorstore = get_weaviate_store() - qa_chain = get_new_chain1(vectorstore) - os.environ["OPENAI_API_KEY"] = "" - return qa_chain - - -def chat(inp, history, agent): - history = history or [] - if agent is None: - history.append((inp, "Please paste your OpenAI key to use")) - return history, history - print("\n==== date/time: " + str(datetime.datetime.now()) + " ====") - print("inp: " + inp) - history = history or [] - output = agent({"question": inp, "chat_history": history}) - answer = output["answer"] - history.append((inp, answer)) - print(history) - return history, history - - -block = gr.Blocks(css=".gradio-container {background-color: lightblue}") - -with block: - with gr.Row(): - gr.Markdown("

    Elenchos AI

    ") - - openai_api_key_textbox = gr.Textbox( - placeholder="Paste your OpenAI API key (sk-...)", - show_label=False, - lines=1, - type="password", - ) - - chatbot = gr.Chatbot() - - with gr.Row(): - message = gr.Textbox( - label="What would you like to learn about marine biology?", - placeholder="What is an estuary?", - lines=1, - ) - submit = gr.Button(value="Send", variant="secondary").style(full_width=False) - - gr.Examples( - examples=[ - "What are phytoplankton?", - "How does microplastic pollution affect the oceans?", - "What are artificial reefs?", - ], - inputs=message, - ) - - gr.HTML( - """ - This simple application is an implementation of ChatGPT but over an external dataset (in this case, Wikipedia pages on Marine biology).""" - ) - - gr.HTML( - "
    Powered by LangChain 🦜️🔗
    " - ) - - state = gr.State() - agent_state = gr.State() - - submit.click(chat, inputs=[message, state, agent_state], outputs=[chatbot, state]) - message.submit(chat, inputs=[message, state, agent_state], outputs=[chatbot, state]) - - openai_api_key_textbox.change( - set_openai_api_key, - inputs=[openai_api_key_textbox, agent_state], - outputs=[agent_state], - ) - -block.launch(debug=True) diff --git a/spaces/Epoching/GLIDE_Inpaint/app.py b/spaces/Epoching/GLIDE_Inpaint/app.py deleted file mode 100644 index 65591d39c254397914c412996f5865add2f312f0..0000000000000000000000000000000000000000 --- a/spaces/Epoching/GLIDE_Inpaint/app.py +++ /dev/null @@ -1,245 +0,0 @@ -import subprocess -subprocess.run('pip install -e .', shell=True) - -print("Installed the repo!") - -# GLIDE imports -from typing import Tuple - -from IPython.display import display -from PIL import Image -import numpy as np -import torch as th -import torch.nn.functional as F - -from glide_text2im.download import load_checkpoint -from glide_text2im.model_creation import ( - create_model_and_diffusion, - model_and_diffusion_defaults, - model_and_diffusion_defaults_upsampler -) - -# gradio app imports -import gradio as gr - -from torchvision.transforms import ToTensor, ToPILImage -image_to_tensor = ToTensor() -tensor_to_image = ToPILImage() - -# This notebook supports both CPU and GPU. -# On CPU, generating one sample may take on the order of 20 minutes. -# On a GPU, it should be under a minute. - -has_cuda = th.cuda.is_available() -device = th.device('cpu' if not has_cuda else 'cuda') - -# Create base model. -options = model_and_diffusion_defaults() -options['inpaint'] = True -options['use_fp16'] = has_cuda -options['timestep_respacing'] = '100' # use 100 diffusion steps for fast sampling -model, diffusion = create_model_and_diffusion(**options) -model.eval() -if has_cuda: - model.convert_to_fp16() -model.to(device) -model.load_state_dict(load_checkpoint('base-inpaint', device)) -print('total base parameters', sum(x.numel() for x in model.parameters())) - -# Create upsampler model. -options_up = model_and_diffusion_defaults_upsampler() -options_up['inpaint'] = True -options_up['use_fp16'] = has_cuda -options_up['timestep_respacing'] = 'fast27' # use 27 diffusion steps for very fast sampling -model_up, diffusion_up = create_model_and_diffusion(**options_up) -model_up.eval() -if has_cuda: - model_up.convert_to_fp16() -model_up.to(device) -model_up.load_state_dict(load_checkpoint('upsample-inpaint', device)) -print('total upsampler parameters', sum(x.numel() for x in model_up.parameters())) - -# Sampling parameters -batch_size = 1 -guidance_scale = 5.0 - -# Tune this parameter to control the sharpness of 256x256 images. -# A value of 1.0 is sharper, but sometimes results in grainy artifacts. -upsample_temp = 0.997 - -# Create an classifier-free guidance sampling function -def model_fn(x_t, ts, **kwargs): - half = x_t[: len(x_t) // 2] - combined = th.cat([half, half], dim=0) - model_out = model(combined, ts, **kwargs) - eps, rest = model_out[:, :3], model_out[:, 3:] - cond_eps, uncond_eps = th.split(eps, len(eps) // 2, dim=0) - half_eps = uncond_eps + guidance_scale * (cond_eps - uncond_eps) - eps = th.cat([half_eps, half_eps], dim=0) - return th.cat([eps, rest], dim=1) - -def denoised_fn(x_start): - # Force the model to have the exact right x_start predictions - # for the part of the image which is known. - return ( - x_start * (1 - model_kwargs['inpaint_mask']) - + model_kwargs['inpaint_image'] * model_kwargs['inpaint_mask'] - ) - -def show_images(batch: th.Tensor): - """ Display a batch of images inline. """ - scaled = ((batch + 1)*127.5).round().clamp(0,255).to(th.uint8).cpu() - reshaped = scaled.permute(2, 0, 3, 1).reshape([batch.shape[2], -1, 3]) - return Image.fromarray(reshaped.numpy()) - -def read_image(path: str, size: int = 256) -> Tuple[th.Tensor, th.Tensor]: - pil_img = Image.open(path).convert('RGB') - pil_img = pil_img.resize((size, size), resample=Image.BICUBIC) - img = np.array(pil_img) - return th.from_numpy(img)[None].permute(0, 3, 1, 2).float() / 127.5 - 1 - -def pil_to_numpy(pil_img: Image) -> Tuple[th.Tensor, th.Tensor]: - img = np.array(pil_img) - return th.from_numpy(img)[None].permute(0, 3, 1, 2).float() / 127.5 - 1 - -model_kwargs = dict() -def inpaint(input_img, input_img_with_mask, prompt): - - print(prompt) - - # Save as png for later mask detection :) - input_img_256 = input_img.convert('RGB').resize((256, 256), resample=Image.BICUBIC) - input_img_64 = input_img.convert('RGB').resize((64, 64), resample=Image.BICUBIC) - - # Source image we are inpainting - source_image_256 = pil_to_numpy(input_img_256) - source_image_64 = pil_to_numpy(input_img_64) - - # Since gradio doesn't supply which pixels were drawn, we need to find it ourselves! - # Assuming that all black pixels are meant for inpainting. - input_img_with_mask_64 = input_img_with_mask.convert('L').resize((64, 64), resample=Image.BICUBIC) - gray_scale_source_image = image_to_tensor(input_img_with_mask_64) - source_mask_64 = (gray_scale_source_image!=0).float() - source_mask_64_img = tensor_to_image(source_mask_64) - - # The mask should always be a boolean 64x64 mask, and then we - # can upsample it for the second stage. - source_mask_64 = source_mask_64.unsqueeze(0) - source_mask_256 = F.interpolate(source_mask_64, (256, 256), mode='nearest') - - - ############################## - # Sample from the base model # - ############################## - - # Create the text tokens to feed to the model. - tokens = model.tokenizer.encode(prompt) - tokens, mask = model.tokenizer.padded_tokens_and_mask( - tokens, options['text_ctx'] - ) - - # Create the classifier-free guidance tokens (empty) - full_batch_size = batch_size * 2 - uncond_tokens, uncond_mask = model.tokenizer.padded_tokens_and_mask( - [], options['text_ctx'] - ) - - # Pack the tokens together into model kwargs. - global model_kwargs - model_kwargs = dict( - tokens=th.tensor( - [tokens] * batch_size + [uncond_tokens] * batch_size, device=device - ), - mask=th.tensor( - [mask] * batch_size + [uncond_mask] * batch_size, - dtype=th.bool, - device=device, - ), - - # Masked inpainting image - inpaint_image=(source_image_64 * source_mask_64).repeat(full_batch_size, 1, 1, 1).to(device), - inpaint_mask=source_mask_64.repeat(full_batch_size, 1, 1, 1).to(device), - ) - - # Sample from the base model. - model.del_cache() - samples = diffusion.p_sample_loop( - model_fn, - (full_batch_size, 3, options["image_size"], options["image_size"]), - device=device, - clip_denoised=True, - progress=True, - model_kwargs=model_kwargs, - cond_fn=None, - denoised_fn=denoised_fn, - )[:batch_size] - model.del_cache() - - ############################## - # Upsample the 64x64 samples # - ############################## - - tokens = model_up.tokenizer.encode(prompt) - tokens, mask = model_up.tokenizer.padded_tokens_and_mask( - tokens, options_up['text_ctx'] - ) - - # Create the model conditioning dict. - model_kwargs = dict( - # Low-res image to upsample. - low_res=((samples+1)*127.5).round()/127.5 - 1, - - # Text tokens - tokens=th.tensor( - [tokens] * batch_size, device=device - ), - mask=th.tensor( - [mask] * batch_size, - dtype=th.bool, - device=device, - ), - - # Masked inpainting image. - inpaint_image=(source_image_256 * source_mask_256).repeat(batch_size, 1, 1, 1).to(device), - inpaint_mask=source_mask_256.repeat(batch_size, 1, 1, 1).to(device), - ) - - # Sample from the base model. - model_up.del_cache() - up_shape = (batch_size, 3, options_up["image_size"], options_up["image_size"]) - up_samples = diffusion_up.p_sample_loop( - model_up, - up_shape, - noise=th.randn(up_shape, device=device) * upsample_temp, - device=device, - clip_denoised=True, - progress=True, - model_kwargs=model_kwargs, - cond_fn=None, - denoised_fn=denoised_fn, - )[:batch_size] - model_up.del_cache() - - return source_mask_64_img, show_images(up_samples) - -gradio_inputs = [gr.inputs.Image(type='pil', - label="Input Image"), - gr.inputs.Image(type='pil', - label="Input Image With Mask"), - gr.inputs.Textbox(label='Conditional Text to Inpaint')] - -# gradio_outputs = [gr.outputs.Image(label='Auto-Detected Mask (From drawn black pixels)')] - -gradio_outputs = [gr.outputs.Image(label='Auto-Detected Mask (From drawn black pixels)'), - gr.outputs.Image(label='Inpainted Image')] -examples = [['grass.png', 'grass_with_mask.png', 'a corgi in a field']] - -title = "GLIDE Inpaint" -description = "[WARNING: Queue times may take 4-6 minutes per person if there's no GPU! If there is a GPU, it'll take around 60 seconds] Using GLIDE to inpaint black regions of an input image! Instructions: 1) For the 'Input Image', upload an image. 2) For the 'Input Image with Mask', draw a black-colored mask (either manually with something like Paint, or by using gradio's built-in image editor & add a black-colored shape) IT MUST BE BLACK COLOR, but doesn't have to be rectangular! This is because it auto-detects the mask based on 0 (black) pixel values! 3) For the Conditional Text, type something you'd like to see the black region get filled in with :)" -article = "

    GLIDE: Towards Photorealistic Image Generation and Editing with Text-Guided Diffusion Models | Github Repo | visitor badge

    " -iface = gr.Interface(fn=inpaint, inputs=gradio_inputs, - outputs=gradio_outputs, - examples=examples, title=title, - description=description, article=article, - enable_queue=True) -iface.launch() \ No newline at end of file diff --git a/spaces/Felix123456/bingo/src/components/button-scroll-to-bottom.tsx b/spaces/Felix123456/bingo/src/components/button-scroll-to-bottom.tsx deleted file mode 100644 index b68ab9c0e48320c356e51a52d11b9ca63909e6c5..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/src/components/button-scroll-to-bottom.tsx +++ /dev/null @@ -1,34 +0,0 @@ -'use client' - -import * as React from 'react' - -import { cn } from '@/lib/utils' -import { useAtBottom } from '@/lib/hooks/use-at-bottom' -import { Button, type ButtonProps } from '@/components/ui/button' -import { IconArrowDown } from '@/components/ui/icons' - -export function ButtonScrollToBottom({ className, ...props }: ButtonProps) { - const isAtBottom = useAtBottom() - - return ( - - ) -} diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/environments/environment.py b/spaces/Gen-Sim/Gen-Sim/cliport/environments/environment.py deleted file mode 100644 index 16c91ed60474f512e17a8b89b345ba435803c612..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/environments/environment.py +++ /dev/null @@ -1,735 +0,0 @@ -"""Environment class.""" - -import os -import tempfile -import time -import cv2 -import imageio - -import gym -import numpy as np -from cliport.tasks import cameras -from cliport.utils import pybullet_utils -from cliport.utils import utils -import string -import pybullet as p -import tempfile -import random -import sys - -PLACE_STEP = 0.0003 -PLACE_DELTA_THRESHOLD = 0.005 - -UR5_URDF_PATH = 'ur5/ur5.urdf' -UR5_WORKSPACE_URDF_PATH = 'ur5/workspace.urdf' -PLANE_URDF_PATH = 'plane/plane.urdf' - - -class Environment(gym.Env): - """OpenAI Gym-style environment class.""" - - def __init__(self, - assets_root, - task=None, - disp=False, - shared_memory=False, - hz=240, - record_cfg=None): - """Creates OpenAI Gym-style environment with PyBullet. - - Args: - assets_root: root directory of assets. - task: the task to use. If None, the user must call set_task for the - environment to work properly. - disp: show environment with PyBullet's built-in display viewer. - shared_memory: run with shared memory. - hz: PyBullet physics simulation step speed. Set to 480 for deformables. - - Raises: - RuntimeError: if pybullet cannot load fileIOPlugin. - """ - self.curr_video = [] - self.pix_size = 0.003125 - self.obj_ids = {'fixed': [], 'rigid': [], 'deformable': []} - self.objects = self.obj_ids # make a copy - - self.homej = np.array([-1, -0.5, 0.5, -0.5, -0.5, 0]) * np.pi - self.agent_cams = cameras.RealSenseD415.CONFIG - self.record_cfg = record_cfg - self.save_video = True - self.step_counter = 0 - - self.assets_root = assets_root - - color_tuple = [ - gym.spaces.Box(0, 255, config['image_size'] + (3,), dtype=np.uint8) - for config in self.agent_cams - ] - depth_tuple = [ - gym.spaces.Box(0.0, 20.0, config['image_size'], dtype=np.float32) - for config in self.agent_cams - ] - self.observation_space = gym.spaces.Dict({ - 'color': gym.spaces.Tuple(color_tuple), - 'depth': gym.spaces.Tuple(depth_tuple), - }) - self.position_bounds = gym.spaces.Box( - low=np.array([0.25, -0.5, 0.], dtype=np.float32), - high=np.array([0.75, 0.5, 0.28], dtype=np.float32), - dtype=np.float32) - self.bounds = np.array([[0.25, 0.75], [-0.5, 0.5], [0, 0.3]]) - - self.action_space = gym.spaces.Dict({ - 'pose0': - gym.spaces.Tuple( - (self.position_bounds, - gym.spaces.Box(-1.0, 1.0, shape=(4,), dtype=np.float32))), - 'pose1': - gym.spaces.Tuple( - (self.position_bounds, - gym.spaces.Box(-1.0, 1.0, shape=(4,), dtype=np.float32))) - }) - - # Start PyBullet. - disp_option = p.DIRECT - if disp: - disp_option = p.GUI - if shared_memory: - disp_option = p.SHARED_MEMORY - client = p.connect(disp_option) - file_io = p.loadPlugin('fileIOPlugin', physicsClientId=client) - if file_io < 0: - raise RuntimeError('pybullet: cannot load FileIO!') - if file_io >= 0: - p.executePluginCommand( - file_io, - textArgument=assets_root, - intArgs=[p.AddFileIOAction], - physicsClientId=client) - - p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0) - p.setPhysicsEngineParameter(enableFileCaching=0) - p.setAdditionalSearchPath(assets_root) - p.setAdditionalSearchPath(tempfile.gettempdir()) - p.setTimeStep(1. / hz) - - # If using --disp, move default camera closer to the scene. - if disp: - target = p.getDebugVisualizerCamera()[11] - p.resetDebugVisualizerCamera( - cameraDistance=1.1, - cameraYaw=90, - cameraPitch=-25, - cameraTargetPosition=target) - - if task: - self.set_task(task) - - def __del__(self): - if hasattr(self, 'video_writer'): - self.video_writer.close() - - @property - def is_static(self): - """Return true if objects are no longer moving.""" - v = [np.linalg.norm(p.getBaseVelocity(i)[0]) - for i in self.obj_ids['rigid']] - return all(np.array(v) < 5e-3) - - def fill_dummy_template(self, template): - """check if there are empty templates that haven't been fulfilled yet. if so. fill in dummy numbers """ - full_template_path = os.path.join(self.assets_root, template) - with open(full_template_path, 'r') as file: - fdata = file.read() - - fill = False - for field in ['DIMH', 'DIMR', 'DIMX', 'DIMY', 'DIMZ', 'DIM']: - # usually 3 should be enough - if field in fdata: - default_replace_vals = np.random.uniform(0.03, 0.05, size=(3,)).tolist() # [0.03,0.03,0.03] - for i in range(len(default_replace_vals)): - fdata = fdata.replace(f'{field}{i}', str(default_replace_vals[i])) - fill = True - - for field in ['HALF']: - # usually 3 should be enough - if field in fdata: - default_replace_vals = np.random.uniform(0.01, 0.03, size=(3,)).tolist() # [0.015,0.015,0.015] - for i in range(len(default_replace_vals)): - fdata = fdata.replace(f'{field}{i}', str(default_replace_vals[i])) - fill = True - - if fill: - alphabet = string.ascii_lowercase + string.digits - rname = ''.join(random.choices(alphabet, k=16)) - tmpdir = tempfile.gettempdir() - template_filename = os.path.split(template)[-1] - fname = os.path.join(tmpdir, f'{template_filename}.{rname}') - with open(fname, 'w') as file: - file.write(fdata) - # print("fill-in dummys") - - return fname - else: - return template - - def add_object(self, urdf, pose, category='rigid', color=None, **kwargs): - """List of (fixed, rigid, or deformable) objects in env.""" - fixed_base = 1 if category == 'fixed' else 0 - - if 'template' in urdf: - if not os.path.exists(os.path.join(self.assets_root, urdf)): - urdf = urdf.replace("-template", "") - - urdf = self.fill_dummy_template(urdf) - - if not os.path.exists(os.path.join(self.assets_root, urdf)): - print(f"missing urdf error: {os.path.join(self.assets_root, urdf)}. use dummy block.") - urdf = 'stacking/block.urdf' - - obj_id = pybullet_utils.load_urdf( - p, - os.path.join(self.assets_root, urdf), - pose[0], - pose[1], - useFixedBase=fixed_base) - - if not obj_id is None: - self.obj_ids[category].append(obj_id) - - if color is not None: - if type(color) is str: - color = utils.COLORS[color] - color = color + [1.] - p.changeVisualShape(obj_id, -1, rgbaColor=color) - - if hasattr(self, 'record_cfg') and 'blender_render' in self.record_cfg and self.record_cfg['blender_render']: - # print("urdf:", os.path.join(self.assets_root, urdf)) - # if color is None: - # color = (0.5,0.5,0.5,1) # by default - print("color:", color) - - self.blender_recorder.register_object(obj_id, os.path.join(self.assets_root, urdf), color=color) - - return obj_id - - def set_color(self, obj_id, color): - p.changeVisualShape(obj_id, -1, rgbaColor=color + [1]) - - def set_object_color(self, *args, **kwargs): - return self.set_color(*args, **kwargs) - - # --------------------------------------------------------------------------- - # Standard Gym Functions - # --------------------------------------------------------------------------- - - def seed(self, seed=None): - self._random = np.random.RandomState(seed) - return seed - - def reset(self): - """Performs common reset functionality for all supported tasks.""" - if not self.task: - raise ValueError('environment task must be set. Call set_task or pass ' - 'the task arg in the environment constructor.') - self.obj_ids = {'fixed': [], 'rigid': [], 'deformable': []} - p.resetSimulation(p.RESET_USE_DEFORMABLE_WORLD) - p.setGravity(0, 0, -9.8) - - # Temporarily disable rendering to load scene faster. - p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0) - - plane = pybullet_utils.load_urdf(p, os.path.join(self.assets_root, PLANE_URDF_PATH), - [0, 0, -0.001]) - workspace = pybullet_utils.load_urdf( - p, os.path.join(self.assets_root, UR5_WORKSPACE_URDF_PATH), [0.5, 0, 0]) - - # Load UR5 robot arm equipped with suction end effector. - # TODO(andyzeng): add back parallel-jaw grippers. - self.ur5 = pybullet_utils.load_urdf( - p, os.path.join(self.assets_root, UR5_URDF_PATH)) - self.ee = self.task.ee(self.assets_root, self.ur5, 9, self.obj_ids) - self.ee_tip = 10 # Link ID of suction cup. - - if hasattr(self, 'record_cfg') and 'blender_render' in self.record_cfg and self.record_cfg['blender_render']: - from misc.pyBulletSimRecorder import PyBulletRecorder - self.blender_recorder = PyBulletRecorder() - - self.blender_recorder.register_object(plane, os.path.join(self.assets_root, PLANE_URDF_PATH)) - self.blender_recorder.register_object(workspace, os.path.join(self.assets_root, UR5_WORKSPACE_URDF_PATH)) - self.blender_recorder.register_object(self.ur5, os.path.join(self.assets_root, UR5_URDF_PATH)) - - self.blender_recorder.register_object(self.ee.base, self.ee.base_urdf_path) - if hasattr(self.ee, 'body'): - self.blender_recorder.register_object(self.ee.body, self.ee.urdf_path) - - - # Get revolute joint indices of robot (skip fixed joints). - n_joints = p.getNumJoints(self.ur5) - joints = [p.getJointInfo(self.ur5, i) for i in range(n_joints)] - self.joints = [j[0] for j in joints if j[2] == p.JOINT_REVOLUTE] - - # Move robot to home joint configuration. - for i in range(len(self.joints)): - p.resetJointState(self.ur5, self.joints[i], self.homej[i]) - - # Reset end effector. - self.ee.release() - - # Reset task. - self.task.reset(self) - - # Re-enable rendering. - p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1) - - self.step() - # obs, _, _, _ = self.step() - # return obs - - def step(self, action=None): - """Execute action with specified primitive. - - Args: - action: action to execute. - - Returns: - (obs, reward, done, info) tuple containing MDP step data. - """ - if action is not None: - timeout = self.task.primitive(self.movej, self.movep, self.ee, action['pose0'], action['pose1']) - - # Exit early if action times out. We still return an observation - # so that we don't break the Gym API contract. - if timeout: - obs = {'color': (), 'depth': ()} - for config in self.agent_cams: - color, depth, _ = self.render_camera(config) - obs['color'] += (color,) - obs['depth'] += (depth,) - - return obs, 0.0, True, self.info - - start_time = time.time() - # Step simulator asynchronously until objects settle. - while not self.is_static: - self.step_simulation() - if time.time() - start_time > 5: # timeout - break - - # Get task rewards. - reward, info = self.task.reward() if action is not None else (0, {}) - done = self.task.done() - - # Add ground truth robot state into info. - info.update(self.info) - - obs = self._get_obs() - - # if not os.path.exists(self.record_cfg['save_video_path']): - # os.mkdir(self.record_cfg['save_video_path']) - # self.video_path = os.path.join(self.record_cfg['save_video_path'], "123.mp4") - # video_writer = imageio.get_writer(self.video_path, - # fps=self.record_cfg['fps'], - # format='FFMPEG', - # codec='h264', ) - # print(f"has {len(self.curr_video)} frames to save") - # for color in self.curr_video: - # video_writer.append_data(color) - # video_writer.close() - # print("save video to ", self.video_path) - # self.video_path = None - - - self.cur_obs = obs - self.cur_reward = reward - self.cur_done = done - self.cur_info = info - # yield "Task Generated ==> Asset Generated ==> API Reviewed ==> Error Reviewed ==> Code Generated ==> Running Simulation", self.generated_code, self.video_path - - - def step_simulation(self): - p.stepSimulation() - self.step_counter += 1 - - if self.save_video and self.step_counter % 60 == 0: - self.add_video_frame() - - def render(self, mode='rgb_array'): - # Render only the color image from the first camera. - # Only support rgb_array for now. - if mode != 'rgb_array': - raise NotImplementedError('Only rgb_array implemented') - color, _, _ = self.render_camera(self.agent_cams[0]) - - return color - - def render_camera_without_seg(self, config, image_size=None, shadow=1): - """Render RGB-D image with specified camera configuration.""" - if not image_size: - image_size = config['image_size'] - - # OpenGL camera settings. - lookdir = np.float32([0, 0, 1]).reshape(3, 1) - updir = np.float32([0, -1, 0]).reshape(3, 1) - rotation = p.getMatrixFromQuaternion(config['rotation']) - rotm = np.float32(rotation).reshape(3, 3) - lookdir = (rotm @ lookdir).reshape(-1) - updir = (rotm @ updir).reshape(-1) - lookat = config['position'] + lookdir - focal_len = config['intrinsics'][0] - znear, zfar = config['zrange'] - viewm = p.computeViewMatrix(config['position'], lookat, updir) - fovh = (image_size[0] / 2) / focal_len - fovh = (640 / 2) / focal_len - fovh = 180 * np.arctan(fovh) * 2 / np.pi - - # Notes: 1) FOV is vertical FOV 2) aspect must be float - aspect_ratio = image_size[1] / image_size[0] - projm = p.computeProjectionMatrixFOV(fovh, aspect_ratio, znear, zfar) - - # Render with OpenGL camera settings. - _, _, color, depth, segm = p.getCameraImage( - width=image_size[1], - height=image_size[0], - viewMatrix=viewm, - projectionMatrix=projm, - shadow=shadow, - flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX, - renderer=p.ER_BULLET_HARDWARE_OPENGL) - - # Get color image. - color_image_size = (image_size[0], image_size[1], 4) - color = np.array(color, dtype=np.uint8).reshape(color_image_size) - color = color[:, :, :3] # remove alpha channel - if config['noise']: - color = np.int32(color) - color += np.int32(self._random.normal(0, 3, image_size)) - color = np.uint8(np.clip(color, 0, 255)) - - # Get depth image. - depth_image_size = (image_size[0], image_size[1]) - zbuffer = np.array(depth).reshape(depth_image_size) - depth = (zfar + znear - (2. * zbuffer - 1.) * (zfar - znear)) - depth = (2. * znear * zfar) / depth - if config['noise']: - depth += self._random.normal(0, 0.003, depth_image_size) - - # Get segmentation image. - segm = np.uint8(segm).reshape(depth_image_size) - - return color, depth, segm - - def render_camera(self, config, image_size=None, shadow=1): - """Render RGB-D image with specified camera configuration.""" - if not image_size: - image_size = config['image_size'] - - # OpenGL camera settings. - lookdir = np.float32([0, 0, 1]).reshape(3, 1) - updir = np.float32([0, -1, 0]).reshape(3, 1) - rotation = p.getMatrixFromQuaternion(config['rotation']) - rotm = np.float32(rotation).reshape(3, 3) - lookdir = (rotm @ lookdir).reshape(-1) - updir = (rotm @ updir).reshape(-1) - lookat = config['position'] + lookdir - focal_len = config['intrinsics'][0] - znear, zfar = config['zrange'] - viewm = p.computeViewMatrix(config['position'], lookat, updir) - fovh = (image_size[0] / 2) / focal_len - fovh = 180 * np.arctan(fovh) * 2 / np.pi - - # Notes: 1) FOV is vertical FOV 2) aspect must be float - aspect_ratio = image_size[1] / image_size[0] - projm = p.computeProjectionMatrixFOV(fovh, aspect_ratio, znear, zfar) - - # Render with OpenGL camera settings. - _, _, color, depth, segm = p.getCameraImage( - width=image_size[1], - height=image_size[0], - viewMatrix=viewm, - projectionMatrix=projm, - shadow=shadow, - flags=p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX, - renderer=p.ER_BULLET_HARDWARE_OPENGL) - - # Get color image. - color_image_size = (image_size[0], image_size[1], 4) - color = np.array(color, dtype=np.uint8).reshape(color_image_size) - color = color[:, :, :3] # remove alpha channel - if config['noise']: - color = np.int32(color) - color += np.int32(self._random.normal(0, 3, image_size)) - color = np.uint8(np.clip(color, 0, 255)) - - # Get depth image. - depth_image_size = (image_size[0], image_size[1]) - zbuffer = np.array(depth).reshape(depth_image_size) - depth = (zfar + znear - (2. * zbuffer - 1.) * (zfar - znear)) - depth = (2. * znear * zfar) / depth - if config['noise']: - depth += self._random.normal(0, 0.003, depth_image_size) - - # Get segmentation image. - segm = np.uint8(segm).reshape(depth_image_size) - - return color, depth, segm - - @property - def info(self): - """Environment info variable with object poses, dimensions, and colors.""" - - # Some tasks create and remove zones, so ignore those IDs. - # removed_ids = [] - # if (isinstance(self.task, tasks.names['cloth-flat-notarget']) or - # isinstance(self.task, tasks.names['bag-alone-open'])): - # removed_ids.append(self.task.zone_id) - - info = {} # object id : (position, rotation, dimensions) - for obj_ids in self.obj_ids.values(): - for obj_id in obj_ids: - pos, rot = p.getBasePositionAndOrientation(obj_id) - dim = p.getVisualShapeData(obj_id)[0][3] - info[obj_id] = (pos, rot, dim) - - info['lang_goal'] = self.get_lang_goal() - return info - - def set_task(self, task): - task.set_assets_root(self.assets_root) - self.task = task - - def get_task_name(self): - return type(self.task).__name__ - - def get_lang_goal(self): - if self.task: - return self.task.get_lang_goal() - else: - raise Exception("No task for was set") - - # --------------------------------------------------------------------------- - # Robot Movement Functions - # --------------------------------------------------------------------------- - - def movej(self, targj, speed=0.01, timeout=5): - """Move UR5 to target joint configuration.""" - if self.save_video: - timeout = timeout * 2 # 50? - - t0 = time.time() - while (time.time() - t0) < timeout: - currj = [p.getJointState(self.ur5, i)[0] for i in self.joints] - currj = np.array(currj) - diffj = targj - currj - if all(np.abs(diffj) < 1e-2): - return False - - # Move with constant velocity - norm = np.linalg.norm(diffj) - v = diffj / norm if norm > 0 else 0 - stepj = currj + v * speed - gains = np.ones(len(self.joints)) - p.setJointMotorControlArray( - bodyIndex=self.ur5, - jointIndices=self.joints, - controlMode=p.POSITION_CONTROL, - targetPositions=stepj, - positionGains=gains) - self.step_counter += 1 - self.step_simulation() - - print(f'Warning: movej exceeded {timeout} second timeout. Skipping.') - return True - - def start_rec(self, video_filename): - assert self.record_cfg - - # make video directory - if not os.path.exists(self.record_cfg['save_video_path']): - os.makedirs(self.record_cfg['save_video_path']) - - # close and save existing writer - if hasattr(self, 'video_writer'): - self.video_writer.close() - - # initialize writer - self.video_writer = imageio.get_writer(os.path.join(self.record_cfg['save_video_path'], - f"{video_filename}.mp4"), - fps=self.record_cfg['fps'], - format='FFMPEG', - codec='h264',) - p.setRealTimeSimulation(False) - self.save_video = True - - def end_rec(self): - if hasattr(self, 'video_writer'): - self.video_writer.close() - - p.setRealTimeSimulation(True) - self.save_video = False - - def add_video_frame(self): - # Render frame. - config = self.agent_cams[0] - image_size = (self.record_cfg['video_height'], self.record_cfg['video_width']) - color, _, _ = self.render_camera_without_seg(config, image_size, shadow=0) - color = np.array(color) - - # if hasattr(self.record_cfg, 'blender_render') and self.record_cfg['blender_render']: - # print("add blender key frame") - # self.blender_recorder.add_keyframe() - - # Add language instruction to video. - # if self.record_cfg['add_text']: - # lang_goal = self.get_lang_goal() - # reward = f"Success: {self.task.get_reward():.3f}" - # - # font = cv2.FONT_HERSHEY_DUPLEX - # font_scale = 0.65 - # font_thickness = 1 - # - # # Write language goal. - # line_length = 60 - # for i in range(len(lang_goal) // line_length + 1): - # lang_textsize = cv2.getTextSize(lang_goal[i*line_length:(i+1)*line_length], font, font_scale, font_thickness)[0] - # lang_textX = (image_size[1] - lang_textsize[0]) // 2 - # color = cv2.putText(color, lang_goal[i*line_length:(i+1)*line_length], org=(lang_textX, 570+i*30), # 600 - # fontScale=font_scale, - # fontFace=font, - # color=(0, 0, 0), - # thickness=font_thickness, lineType=cv2.LINE_AA) - # - # ## Write Reward. - # # reward_textsize = cv2.getTextSize(reward, font, font_scale, font_thickness)[0] - # # reward_textX = (image_size[1] - reward_textsize[0]) // 2 - # # - # # color = cv2.putText(color, reward, org=(reward_textX, 634), - # # fontScale=font_scale, - # # fontFace=font, - # # color=(0, 0, 0), - # # thickness=font_thickness, lineType=cv2.LINE_AA) - # - # color = np.array(color) - # - # if 'add_task_text' in self.record_cfg and self.record_cfg['add_task_text']: - # lang_goal = self.get_task_name() - # reward = f"Success: {self.task.get_reward():.3f}" - # - # font = cv2.FONT_HERSHEY_DUPLEX - # font_scale = 1 - # font_thickness = 2 - # - # # Write language goal. - # lang_textsize = cv2.getTextSize(lang_goal, font, font_scale, font_thickness)[0] - # lang_textX = (image_size[1] - lang_textsize[0]) // 2 - # - # color = cv2.putText(color, lang_goal, org=(lang_textX, 600), - # fontScale=font_scale, - # fontFace=font, - # color=(255, 0, 0), - # thickness=font_thickness, lineType=cv2.LINE_AA) - # - # color = np.array(color) - - self.curr_video.append(color) - # self.video_writer.append_data(color) - - def movep(self, pose, speed=0.01): - """Move UR5 to target end effector pose.""" - targj = self.solve_ik(pose) - return self.movej(targj, speed) - - def solve_ik(self, pose): - """Calculate joint configuration with inverse kinematics.""" - joints = p.calculateInverseKinematics( - bodyUniqueId=self.ur5, - endEffectorLinkIndex=self.ee_tip, - targetPosition=pose[0], - targetOrientation=pose[1], - lowerLimits=[-3 * np.pi / 2, -2.3562, -17, -17, -17, -17], - upperLimits=[-np.pi / 2, 0, 17, 17, 17, 17], - jointRanges=[np.pi, 2.3562, 34, 34, 34, 34], # * 6, - restPoses=np.float32(self.homej).tolist(), - maxNumIterations=100, - residualThreshold=1e-5) - joints = np.float32(joints) - joints[2:] = (joints[2:] + np.pi) % (2 * np.pi) - np.pi - return joints - - def _get_obs(self): - # Get RGB-D camera image observations. - obs = {'color': (), 'depth': ()} - for config in self.agent_cams: - color, depth, _ = self.render_camera(config) - obs['color'] += (color,) - obs['depth'] += (depth,) - - return obs - - def get_object_pose(self, obj_id): - return p.getBasePositionAndOrientation(obj_id) - - def get_object_size(self, obj_id): - """ approximate object's size using AABB """ - aabb_min, aabb_max = p.getAABB(obj_id) - - size_x = aabb_max[0] - aabb_min[0] - size_y = aabb_max[1] - aabb_min[1] - size_z = aabb_max[2] - aabb_min[2] - return size_z * size_y * size_x - - - -class EnvironmentNoRotationsWithHeightmap(Environment): - """Environment that disables any rotations and always passes [0, 0, 0, 1].""" - - def __init__(self, - assets_root, - task=None, - disp=False, - shared_memory=False, - hz=240): - super(EnvironmentNoRotationsWithHeightmap, - self).__init__(assets_root, task, disp, shared_memory, hz) - - heightmap_tuple = [ - gym.spaces.Box(0.0, 20.0, (320, 160, 3), dtype=np.float32), - gym.spaces.Box(0.0, 20.0, (320, 160), dtype=np.float32), - ] - self.observation_space = gym.spaces.Dict({ - 'heightmap': gym.spaces.Tuple(heightmap_tuple), - }) - self.action_space = gym.spaces.Dict({ - 'pose0': gym.spaces.Tuple((self.position_bounds,)), - 'pose1': gym.spaces.Tuple((self.position_bounds,)) - }) - - def step(self, action=None): - """Execute action with specified primitive. - - Args: - action: action to execute. - - Returns: - (obs, reward, done, info) tuple containing MDP step data. - """ - if action is not None: - action = { - 'pose0': (action['pose0'][0], [0., 0., 0., 1.]), - 'pose1': (action['pose1'][0], [0., 0., 0., 1.]), - } - return super(EnvironmentNoRotationsWithHeightmap, self).step(action) - - def _get_obs(self): - obs = {} - - color_depth_obs = {'color': (), 'depth': ()} - for config in self.agent_cams: - color, depth, _ = self.render_camera(config) - color_depth_obs['color'] += (color,) - color_depth_obs['depth'] += (depth,) - cmap, hmap = utils.get_fused_heightmap(color_depth_obs, self.agent_cams, - self.task.bounds, pix_size=0.003125) - obs['heightmap'] = (cmap, hmap) - return obs - diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/options/train_options.py b/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/options/train_options.py deleted file mode 100644 index 583ea1423fdc9a649cd7044d74d554bf0ac2bf51..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/options/train_options.py +++ /dev/null @@ -1,84 +0,0 @@ -from argparse import ArgumentParser -from configs.paths_config import model_paths - - -class TrainOptions: - - def __init__(self): - self.parser = ArgumentParser() - self.initialize() - - def initialize(self): - self.parser.add_argument('--exp_dir', type=str, help='Path to experiment output directory') - self.parser.add_argument('--dataset_type', default='ffhq_encode', type=str, - help='Type of dataset/experiment to run') - self.parser.add_argument('--encoder_type', default='Encoder4Editing', type=str, help='Which encoder to use') - - self.parser.add_argument('--batch_size', default=4, type=int, help='Batch size for training') - self.parser.add_argument('--test_batch_size', default=2, type=int, help='Batch size for testing and inference') - self.parser.add_argument('--workers', default=4, type=int, help='Number of train dataloader workers') - self.parser.add_argument('--test_workers', default=2, type=int, - help='Number of test/inference dataloader workers') - - self.parser.add_argument('--learning_rate', default=0.0001, type=float, help='Optimizer learning rate') - self.parser.add_argument('--optim_name', default='ranger', type=str, help='Which optimizer to use') - self.parser.add_argument('--train_decoder', default=False, type=bool, help='Whether to train the decoder model') - self.parser.add_argument('--start_from_latent_avg', action='store_true', - help='Whether to add average latent vector to generate codes from encoder.') - self.parser.add_argument('--lpips_type', default='alex', type=str, help='LPIPS backbone') - - self.parser.add_argument('--lpips_lambda', default=0.8, type=float, help='LPIPS loss multiplier factor') - self.parser.add_argument('--id_lambda', default=0.1, type=float, help='ID loss multiplier factor') - self.parser.add_argument('--l2_lambda', default=1.0, type=float, help='L2 loss multiplier factor') - - self.parser.add_argument('--stylegan_weights', default=model_paths['stylegan_ffhq'], type=str, - help='Path to StyleGAN model weights') - self.parser.add_argument('--stylegan_size', default=1024, type=int, - help='size of pretrained StyleGAN Generator') - self.parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to pSp model checkpoint') - - self.parser.add_argument('--max_steps', default=500000, type=int, help='Maximum number of training steps') - self.parser.add_argument('--image_interval', default=100, type=int, - help='Interval for logging train images during training') - self.parser.add_argument('--board_interval', default=50, type=int, - help='Interval for logging metrics to tensorboard') - self.parser.add_argument('--val_interval', default=1000, type=int, help='Validation interval') - self.parser.add_argument('--save_interval', default=None, type=int, help='Model checkpoint interval') - - # Discriminator flags - self.parser.add_argument('--w_discriminator_lambda', default=0, type=float, help='Dw loss multiplier') - self.parser.add_argument('--w_discriminator_lr', default=2e-5, type=float, help='Dw learning rate') - self.parser.add_argument("--r1", type=float, default=10, help="weight of the r1 regularization") - self.parser.add_argument("--d_reg_every", type=int, default=16, - help="interval for applying r1 regularization") - self.parser.add_argument('--use_w_pool', action='store_true', - help='Whether to store a latnet codes pool for the discriminator\'s training') - self.parser.add_argument("--w_pool_size", type=int, default=50, - help="W\'s pool size, depends on --use_w_pool") - - # e4e specific - self.parser.add_argument('--delta_norm', type=int, default=2, help="norm type of the deltas") - self.parser.add_argument('--delta_norm_lambda', type=float, default=2e-4, help="lambda for delta norm loss") - - # Progressive training - self.parser.add_argument('--progressive_steps', nargs='+', type=int, default=None, - help="The training steps of training new deltas. steps[i] starts the delta_i training") - self.parser.add_argument('--progressive_start', type=int, default=None, - help="The training step to start training the deltas, overrides progressive_steps") - self.parser.add_argument('--progressive_step_every', type=int, default=2_000, - help="Amount of training steps for each progressive step") - - # Save additional training info to enable future training continuation from produced checkpoints - self.parser.add_argument('--save_training_data', action='store_true', - help='Save intermediate training data to resume training from the checkpoint') - self.parser.add_argument('--sub_exp_dir', default=None, type=str, help='Name of sub experiment directory') - self.parser.add_argument('--keep_optimizer', action='store_true', - help='Whether to continue from the checkpoint\'s optimizer') - self.parser.add_argument('--resume_training_from_ckpt', default=None, type=str, - help='Path to training checkpoint, works when --save_training_data was set to True') - self.parser.add_argument('--update_param_list', nargs='+', type=str, default=None, - help="Name of training parameters to update the loaded training checkpoint") - - def parse(self): - opts = self.parser.parse_args() - return opts diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 5c5b94e5a27d7f902d4bdea7ef6c4ef0b816bb99..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/danet/danet_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/danet_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True), - test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/kld.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/kld.py deleted file mode 100644 index 18260bf974bf47d8381223ac39be0c47c031bf8a..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/metrics/kld.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import contextlib -from functools import partial -import logging -import os -import typing as tp - -import torch -import torchmetrics - -from ..data.audio_utils import convert_audio - - -logger = logging.getLogger(__name__) - - -class _patch_passt_stft: - """Decorator to patch torch.stft in PaSST.""" - def __init__(self): - self.old_stft = torch.stft - - def __enter__(self): - # return_complex is a mandatory parameter in latest torch versions - # torch is throwing RuntimeErrors when not set - torch.stft = partial(torch.stft, return_complex=False) - - def __exit__(self, *exc): - torch.stft = self.old_stft - - -def kl_divergence(pred_probs: torch.Tensor, target_probs: torch.Tensor, epsilon: float = 1e-6) -> torch.Tensor: - """Computes the elementwise KL-Divergence loss between probability distributions - from generated samples and target samples. - - Args: - pred_probs (torch.Tensor): Probabilities for each label obtained - from a classifier on generated audio. Expected shape is [B, num_classes]. - target_probs (torch.Tensor): Probabilities for each label obtained - from a classifier on target audio. Expected shape is [B, num_classes]. - epsilon (float): Epsilon value. - Returns: - kld (torch.Tensor): KLD loss between each generated sample and target pair. - """ - kl_div = torch.nn.functional.kl_div((pred_probs + epsilon).log(), target_probs, reduction="none") - return kl_div.sum(-1) - - -class KLDivergenceMetric(torchmetrics.Metric): - """Base implementation for KL Divergence metric. - - The KL divergence is measured between probability distributions - of class predictions returned by a pre-trained audio classification model. - When the KL-divergence is low, the generated audio is expected to - have similar acoustic characteristics as the reference audio, - according to the classifier. - """ - def __init__(self): - super().__init__() - self.add_state("kld_pq_sum", default=torch.tensor(0.), dist_reduce_fx="sum") - self.add_state("kld_qp_sum", default=torch.tensor(0.), dist_reduce_fx="sum") - self.add_state("kld_all_sum", default=torch.tensor(0.), dist_reduce_fx="sum") - self.add_state("weight", default=torch.tensor(0), dist_reduce_fx="sum") - - def _get_label_distribution(self, x: torch.Tensor, sizes: torch.Tensor, - sample_rates: torch.Tensor) -> tp.Optional[torch.Tensor]: - """Get model output given provided input tensor. - - Args: - x (torch.Tensor): Input audio tensor of shape [B, C, T]. - sizes (torch.Tensor): Actual audio sample length, of shape [B]. - sample_rates (torch.Tensor): Actual audio sample rate, of shape [B]. - Returns: - probs (torch.Tensor): Probabilities over labels, of shape [B, num_classes]. - """ - raise NotImplementedError("implement method to extract label distributions from the model.") - - def update(self, preds: torch.Tensor, targets: torch.Tensor, - sizes: torch.Tensor, sample_rates: torch.Tensor) -> None: - """Calculates running KL-Divergence loss between batches of audio - preds (generated) and target (ground-truth) - Args: - preds (torch.Tensor): Audio samples to evaluate, of shape [B, C, T]. - targets (torch.Tensor): Target samples to compare against, of shape [B, C, T]. - sizes (torch.Tensor): Actual audio sample length, of shape [B]. - sample_rates (torch.Tensor): Actual audio sample rate, of shape [B]. - """ - assert preds.shape == targets.shape - assert preds.size(0) > 0, "Cannot update the loss with empty tensors" - preds_probs = self._get_label_distribution(preds, sizes, sample_rates) - targets_probs = self._get_label_distribution(targets, sizes, sample_rates) - if preds_probs is not None and targets_probs is not None: - assert preds_probs.shape == targets_probs.shape - kld_scores = kl_divergence(preds_probs, targets_probs) - assert not torch.isnan(kld_scores).any(), "kld_scores contains NaN value(s)!" - self.kld_pq_sum += torch.sum(kld_scores) - kld_qp_scores = kl_divergence(targets_probs, preds_probs) - self.kld_qp_sum += torch.sum(kld_qp_scores) - self.weight += torch.tensor(kld_scores.size(0)) - - def compute(self) -> dict: - """Computes KL-Divergence across all evaluated pred/target pairs.""" - weight: float = float(self.weight.item()) # type: ignore - assert weight > 0, "Unable to compute with total number of comparisons <= 0" - logger.info(f"Computing KL divergence on a total of {weight} samples") - kld_pq = self.kld_pq_sum.item() / weight # type: ignore - kld_qp = self.kld_qp_sum.item() / weight # type: ignore - kld_both = kld_pq + kld_qp - return {'kld': kld_pq, 'kld_pq': kld_pq, 'kld_qp': kld_qp, 'kld_both': kld_both} - - -class PasstKLDivergenceMetric(KLDivergenceMetric): - """KL-Divergence metric based on pre-trained PASST classifier on AudioSet. - - From: PaSST: Efficient Training of Audio Transformers with Patchout - Paper: https://arxiv.org/abs/2110.05069 - Implementation: https://github.com/kkoutini/PaSST - - Follow instructions from the github repo: - ``` - pip install 'git+https://github.com/kkoutini/passt_hear21@0.0.19#egg=hear21passt' - ``` - - Args: - pretrained_length (float, optional): Audio duration used for the pretrained model. - """ - def __init__(self, pretrained_length: tp.Optional[float] = None): - super().__init__() - self._initialize_model(pretrained_length) - - def _initialize_model(self, pretrained_length: tp.Optional[float] = None): - """Initialize underlying PaSST audio classifier.""" - model, sr, max_frames, min_frames = self._load_base_model(pretrained_length) - self.min_input_frames = min_frames - self.max_input_frames = max_frames - self.model_sample_rate = sr - self.model = model - self.model.eval() - self.model.to(self.device) - - def _load_base_model(self, pretrained_length: tp.Optional[float]): - """Load pretrained model from PaSST.""" - try: - if pretrained_length == 30: - from hear21passt.base30sec import get_basic_model # type: ignore - max_duration = 30 - elif pretrained_length == 20: - from hear21passt.base20sec import get_basic_model # type: ignore - max_duration = 20 - else: - from hear21passt.base import get_basic_model # type: ignore - # Original PASST was trained on AudioSet with 10s-long audio samples - max_duration = 10 - min_duration = 0.15 - min_duration = 0.15 - except ModuleNotFoundError: - raise ModuleNotFoundError( - "Please install hear21passt to compute KL divergence: ", - "pip install 'git+https://github.com/kkoutini/passt_hear21@0.0.19#egg=hear21passt'" - ) - model_sample_rate = 32_000 - max_input_frames = int(max_duration * model_sample_rate) - min_input_frames = int(min_duration * model_sample_rate) - with open(os.devnull, 'w') as f, contextlib.redirect_stdout(f): - model = get_basic_model(mode='logits') - return model, model_sample_rate, max_input_frames, min_input_frames - - def _process_audio(self, wav: torch.Tensor, sample_rate: int, wav_len: int) -> tp.Optional[torch.Tensor]: - wav = wav.unsqueeze(0) - wav = wav[..., :wav_len] - wav = convert_audio(wav, from_rate=sample_rate, to_rate=self.model_sample_rate, to_channels=1) - wav = wav.squeeze(0) - # create chunks of audio to match the classifier processing length - segments = torch.split(wav, self.max_input_frames, dim=-1) - valid_segments = [] - for s in segments: - if s.size(-1) > self.min_input_frames: - s = torch.nn.functional.pad(s, (0, self.max_input_frames - s.shape[-1])) - valid_segments.append(s) - if len(valid_segments) > 0: - return torch.stack(valid_segments, dim=0) - else: - return None - - def _get_label_distribution(self, x: torch.Tensor, sizes: torch.Tensor, - sample_rates: torch.Tensor) -> tp.Optional[torch.Tensor]: - """Get model output given provided input tensor. - - Args: - x (torch.Tensor): Input audio tensor of shape [B, C, T]. - sizes (torch.Tensor): Actual audio sample length, of shape [B]. - sample_rates (torch.Tensor): Actual audio sample rate, of shape [B]. - Returns: - probs (torch.Tensor, optional): Probabilities over labels, of shape [B, num_classes]. - """ - all_probs: tp.List[torch.Tensor] = [] - for i, wav in enumerate(x): - sample_rate = int(sample_rates[i].item()) - wav_len = int(sizes[i].item()) - wav = self._process_audio(wav, sample_rate, wav_len) - if wav is not None: - assert wav.dim() == 3, f"Unexpected number of dims for preprocessed wav: {wav.shape}" - wav = wav.mean(dim=1) - # PaSST is printing a lot of infos that we are not interested in - with open(os.devnull, 'w') as f, contextlib.redirect_stdout(f): - with torch.no_grad(), _patch_passt_stft(): - logits = self.model(wav.to(self.device)) - probs = torch.softmax(logits, dim=-1) - probs = probs.mean(dim=0) - all_probs.append(probs) - if len(all_probs) > 0: - return torch.stack(all_probs, dim=0) - else: - return None diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/docs/AUDIOGEN.md b/spaces/GrandaddyShmax/AudioCraft_Plus/docs/AUDIOGEN.md deleted file mode 100644 index a0ff481190fb52fe865aa66aaaa10176f7cf995c..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/docs/AUDIOGEN.md +++ /dev/null @@ -1,158 +0,0 @@ -# AudioGen: Textually-guided audio generation - -AudioCraft provides the code and a model re-implementing AudioGen, a [textually-guided audio generation][audiogen_arxiv] -model that performs text-to-sound generation. - -The provided AudioGen reimplementation follows the LM model architecture introduced in [MusicGen][musicgen_arxiv] -and is a single stage auto-regressive Transformer model trained over a 16kHz -EnCodec tokenizer with 4 codebooks sampled at 50 Hz. -This model variant reaches similar audio quality than the original implementation introduced in the AudioGen publication -while providing faster generation speed given the smaller frame rate. - -**Important note:** The provided models are NOT the original models used to report numbers in the -[AudioGen publication][audiogen_arxiv]. Refer to the model card to learn more about architectural changes. - -Listen to samples from the **original AudioGen implementation** in our [sample page][audiogen_samples]. - - -## Model Card - -See [the model card](../model_cards/AUDIOGEN_MODEL_CARD.md). - - -## Installation - -Please follow the AudioCraft installation instructions from the [README](../README.md). - -AudioCraft requires a GPU with at least 16 GB of memory for running inference with the medium-sized models (~1.5B parameters). - -## API and usage - -We provide a simple API and 1 pre-trained models for AudioGen: - -`facebook/audiogen-medium`: 1.5B model, text to sound - [🤗 Hub](https://huggingface.co/facebook/audiogen-medium) - -You can play with AudioGen by running the jupyter notebook at [`demos/audiogen_demo.ipynb`](../demos/audiogen_demo.ipynb) locally (if you have a GPU). - -See after a quick example for using the API. - -```python -import torchaudio -from audiocraft.models import AudioGen -from audiocraft.data.audio import audio_write - -model = AudioGen.get_pretrained('facebook/audiogen-medium') -model.set_generation_params(duration=5) # generate 5 seconds. -descriptions = ['dog barking', 'sirene of an emergency vehicle', 'footsteps in a corridor'] -wav = model.generate(descriptions) # generates 3 samples. - -for idx, one_wav in enumerate(wav): - # Will save under {idx}.wav, with loudness normalization at -14 db LUFS. - audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True) -``` - -## Training - -The [AudioGenSolver](../audiocraft/solvers/audiogen.py) implements the AudioGen's training pipeline -used to develop the released model. Note that this may not fully reproduce the results presented in the paper. -Similarly to MusicGen, it defines an autoregressive language modeling task over multiple streams of -discrete tokens extracted from a pre-trained EnCodec model (see [EnCodec documentation](./ENCODEC.md) -for more details on how to train such model) with dataset-specific changes for environmental sound -processing. - -Note that **we do NOT provide any of the datasets** used for training AudioGen. - -### Example configurations and grids - -We provide configurations to reproduce the released models and our research. -AudioGen solvers configuration are available in [config/solver/audiogen](../config/solver/audiogen). -The base training configuration used for the released models is the following: -[`solver=audiogen/audiogen_base_16khz`](../config/solver/audiogen/audiogen_base_16khz.yaml) - -Please find some example grids to train AudioGen at -[audiocraft/grids/audiogen](../audiocraft/grids/audiogen/). - -```shell -# text-to-sound -dora grid audiogen.audiogen_base_16khz -``` - -### Sound dataset and metadata - -AudioGen's underlying dataset is an AudioDataset augmented with description metadata. -The AudioGen dataset implementation expects the metadata to be available as `.json` files -at the same location as the audio files or through specified external folder. -Learn more in the [datasets section](./DATASETS.md). - -### Evaluation stage - -By default, evaluation stage is also computing the cross-entropy and the perplexity over the -evaluation dataset. Indeed the objective metrics used for evaluation can be costly to run -or require some extra dependencies. Please refer to the [metrics documentation](./METRICS.md) -for more details on the requirements for each metric. - -We provide an off-the-shelf configuration to enable running the objective metrics -for audio generation in -[config/solver/audiogen/evaluation/objective_eval](../config/solver/audiogen/evaluation/objective_eval.yaml). - -One can then activate evaluation the following way: -```shell -# using the configuration -dora run solver=audiogen/debug solver/audiogen/evaluation=objective_eval -# specifying each of the fields, e.g. to activate KL computation -dora run solver=audiogen/debug evaluate.metrics.kld=true -``` - -See [an example evaluation grid](../audiocraft/grids/audiogen/audiogen_pretrained_16khz_eval.py). - -### Generation stage - -The generation stage allows to generate samples conditionally and/or unconditionally and to perform -audio continuation (from a prompt). We currently support greedy sampling (argmax), sampling -from softmax with a given temperature, top-K and top-P (nucleus) sampling. The number of samples -generated and the batch size used are controlled by the `dataset.generate` configuration -while the other generation parameters are defined in `generate.lm`. - -```shell -# control sampling parameters -dora run solver=audiogen/debug generate.lm.gen_duration=5 generate.lm.use_sampling=true generate.lm.top_k=15 -``` - -## More information - -Refer to [MusicGen's instructions](./MUSICGEN.md). - -### Learn more - -Learn more about AudioCraft training pipelines in the [dedicated section](./TRAINING.md). - - -## Citation - -AudioGen -``` -@article{kreuk2022audiogen, - title={Audiogen: Textually guided audio generation}, - author={Kreuk, Felix and Synnaeve, Gabriel and Polyak, Adam and Singer, Uriel and D{\'e}fossez, Alexandre and Copet, Jade and Parikh, Devi and Taigman, Yaniv and Adi, Yossi}, - journal={arXiv preprint arXiv:2209.15352}, - year={2022} -} -``` - -MusicGen -``` -@article{copet2023simple, - title={Simple and Controllable Music Generation}, - author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez}, - year={2023}, - journal={arXiv preprint arXiv:2306.05284}, -} -``` - -## License - -See license information in the [model card](../model_cards/AUDIOGEN_MODEL_CARD.md). - -[audiogen_arxiv]: https://arxiv.org/abs/2209.15352 -[musicgen_arxiv]: https://arxiv.org/abs/2306.05284 -[audiogen_samples]: https://felixkreuk.github.io/audiogen/ diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/readme.md b/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/readme.md deleted file mode 100644 index b90ce5a946acf55a6530b3c8d010a5ec2642f6ae..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/classification/readme.md +++ /dev/null @@ -1,23 +0,0 @@ -## 分类下游任务 - -在当前目录下,我们提供丰富的分类任务的示例,其中我们提供三个一键式运行的示例。 - -- demo_classification_afqmc_roberta.sh 使用DDP微调roberta -- demo_classification_afqmc_roberta_deepspeed.sh 结合deepspeed微调roberta,获得更快的运算速度 -- demo_classification_afqmc_erlangshen_offload.sh 仅需7G显存即可微调我们效果最好的二郎神系列模型 - -上述示例均采用AFQMC的数据集,关于数据集的介绍可以在[这里](https://www.cluebenchmarks.com/introduce.html)找到。 -同时我们处理过的数据文件已经放在Huggingface上,点击[这里](https://huggingface.co/datasets/IDEA-CCNL/AFQMC)直达源文件。 -仅需要按我们的格式稍微处理一下数据集,即可适配下游不同的分类任务。 -在脚本示例中,仅需要修改如下参数即可适配本地文件 -``` - --dataset_name IDEA-CCNL/AFQMC \ - --------> 修改为 - - --data_dir $DATA_DIR \ # 数据目录 - --train_data train.json \ # 数据文件 - --valid_data dev.json \ - --test_data test.json \ - -``` \ No newline at end of file diff --git a/spaces/Hanqix/oxford_pet_classify/README.md b/spaces/Hanqix/oxford_pet_classify/README.md deleted file mode 100644 index bdb815497bd70e207b8eecbce7c8e973e557bcd1..0000000000000000000000000000000000000000 --- a/spaces/Hanqix/oxford_pet_classify/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Oxford Pet Classify -emoji: 🏃 -colorFrom: pink -colorTo: blue -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/resnet.py b/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/resnet.py deleted file mode 100644 index 9ad8ee87de4bb579d745ab8302a368ca1749a1fe..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/models/ofa/resnet.py +++ /dev/null @@ -1,225 +0,0 @@ -import torch -import torch.nn as nn - - -def drop_path(x, drop_prob: float = 0., training: bool = False): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, - the original name is misleading as 'Drop Connect' is a.sh different form of dropout in a.sh separate paper... - See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for - changing the layer and argument names to 'drop path' rather than mix DropConnect as a.sh layer name and use - 'survival rate' as the argument. - """ - if drop_prob == 0. or not training: - return x - keep_prob = 1 - drop_prob - shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets - random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) - random_tensor.floor_() # binarize - output = x.div(keep_prob) * random_tensor - return output - - -class DropPath(nn.Module): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - """ - def __init__(self, drop_prob=None): - super(DropPath, self).__init__() - self.drop_prob = drop_prob - - def forward(self, x): - return drop_path(x, self.drop_prob, self.training) - - -def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=dilation, groups=groups, bias=False, dilation=dilation) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, - base_width=64, dilation=1, norm_layer=None): - super(BasicBlock, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - if groups != 1 or base_width != 64: - raise ValueError('BasicBlock only supports groups=1 and base_width=64') - if dilation > 1: - raise NotImplementedError("Dilation > 1 not supported in BasicBlock") - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = norm_layer(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = norm_layer(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - assert False - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) - # while original implementation places the stride at the first 1x1 convolution(self.conv1) - # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. - # This variant is also known as ResNet V1.5 and improves accuracy according to - # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. - - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, - base_width=64, dilation=1, norm_layer=None, drop_path_rate=0.0): - super(Bottleneck, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - width = int(planes * (base_width / 64.)) * groups - # Both self.conv2 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv1x1(inplanes, width) - self.bn1 = norm_layer(width) - self.conv2 = conv3x3(width, width, stride, groups, dilation) - self.bn2 = norm_layer(width) - self.conv3 = conv1x1(width, planes * self.expansion) - self.bn3 = norm_layer(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out = identity + self.drop_path(out) - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - - def __init__(self, layers, zero_init_residual=False, - groups=1, width_per_group=64, replace_stride_with_dilation=None, - norm_layer=None, drop_path_rate=0.0): - super(ResNet, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - self._norm_layer = norm_layer - - self.inplanes = 64 - self.dilation = 1 - if replace_stride_with_dilation is None: - # each element in the tuple indicates if we should replace - # the 2x2 stride with a dilated convolution instead - replace_stride_with_dilation = [False, False, False] - if len(replace_stride_with_dilation) != 3: - raise ValueError("replace_stride_with_dilation should be None " - "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) - self.groups = groups - self.base_width = width_per_group - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, - bias=False) - self.bn1 = norm_layer(self.inplanes) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(Bottleneck, 64, layers[0], drop_path_rate=drop_path_rate) - self.layer2 = self._make_layer(Bottleneck, 128, layers[1], stride=2, - dilate=replace_stride_with_dilation[0], drop_path_rate=drop_path_rate) - self.layer3 = self._make_layer(Bottleneck, 256, layers[2], stride=2, - dilate=replace_stride_with_dilation[1], drop_path_rate=drop_path_rate) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, (nn.SyncBatchNorm, nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - # Zero-initialize the last BN in each residual branch, - # so that the residual branch starts with zeros, and each residual block behaves like an identity. - # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 - if zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - nn.init.constant_(m.bn3.weight, 0) - elif isinstance(m, BasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1, dilate=False, drop_path_rate=0.0): - norm_layer = self._norm_layer - downsample = None - previous_dilation = self.dilation - if dilate: - self.dilation *= stride - stride = 1 - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - norm_layer(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample, self.groups, - self.base_width, previous_dilation, norm_layer)) - self.inplanes = planes * block.expansion - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, blocks)] - for i in range(1, blocks): - layers.append(block(self.inplanes, planes, groups=self.groups, - base_width=self.base_width, dilation=self.dilation, - norm_layer=norm_layer, drop_path_rate=dpr[i])) - - return nn.Sequential(*layers) - - def _forward_impl(self, x): - # See note [TorchScript super()] - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - - return x - - def forward(self, x): - return self._forward_impl(x) \ No newline at end of file diff --git a/spaces/HarryLee/eCommerceImageCaptioning/run_scripts/caption/evaluate_caption.sh b/spaces/HarryLee/eCommerceImageCaptioning/run_scripts/caption/evaluate_caption.sh deleted file mode 100644 index f80d26b4beb53a40e2ea2aad74c5f00ee5078c6e..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/run_scripts/caption/evaluate_caption.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -# The port for communication. Note that if you want to run multiple tasks on the same machine, -# you need to specify different port numbers. -export MASTER_PORT=1081 -export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 -export GPUS_PER_NODE=8 - -user_dir=../../ofa_module -bpe_dir=../../utils/BPE - -data=../../dataset/caption_data/caption_test.tsv -path=../../checkpoints/caption_large_best_clean.pt -result_path=../../results/caption -selected_cols=1,4,2 -split='test' - -python3 -m torch.distributed.launch --nproc_per_node=${GPUS_PER_NODE} --master_port=${MASTER_PORT} ../../evaluate.py \ - ${data} \ - --path=${path} \ - --user-dir=${user_dir} \ - --task=caption \ - --batch-size=16 \ - --log-format=simple --log-interval=10 \ - --seed=7 \ - --gen-subset=${split} \ - --results-path=${result_path} \ - --beam=5 \ - --max-len-b=16 \ - --no-repeat-ngram-size=3 \ - --fp16 \ - --num-workers=0 \ - --model-overrides="{\"data\":\"${data}\",\"bpe_dir\":\"${bpe_dir}\",\"eval_cider\":False,\"selected_cols\":\"${selected_cols}\"}" - -python coco_eval.py ../../results/caption/test_predict.json ../../dataset/caption_data/test_caption_coco_format.json diff --git a/spaces/Harsh23Kashyap/StockMarketPredictor/app.py b/spaces/Harsh23Kashyap/StockMarketPredictor/app.py deleted file mode 100644 index 9cc55daa0ff96f612ec6e238d14b271a863d87b0..0000000000000000000000000000000000000000 --- a/spaces/Harsh23Kashyap/StockMarketPredictor/app.py +++ /dev/null @@ -1,300 +0,0 @@ -import pandas as pd -import numpy as np -import matplotlib.pyplot as plt -import pandas_datareader as data -import plotly.express as px -import streamlit as st -import requests -from streamlit_lottie import st_lottie -from keras.models import load_model -from sklearn.preprocessing import MinMaxScaler -from tensorflow.python import tf2 -from pandas_datareader import data as pdr -import yfinance as yf -yf.pdr_override() - - -st.set_page_config( - page_title=" Stocks Market Predictor", #The page title, shown in the browser tab.(should be Placement Details) - initial_sidebar_state="auto", #The way sidebar should start out. Auto shows it in desktop. - page_icon=":computer:", #The page favicon. Use the computer emoji - layout="wide", #The way page content should be laid out. "wide" uses the entire screen. - menu_items={ #Configure the menu that appears on the top-right side of this app. - 'About': 'https://www.linkedin.com/in/harsh-kashyap-79b87b193/', #A markdown string to show in the About dialog. Used my linkedIn id - } -) -def load_lottieurl(url: str): - r = requests.get(url) #Make a request to a web page, and return the status code: - if r.status_code != 200: #200 is the HTTP status code for "OK", a successful response. - return None - return r.json() #return the animated gif - -from datetime import date -from datetime import timedelta -today = date.today() -import datetime -now = datetime.datetime.now() -weekend=today.strftime("%A") -if(weekend=="Saturday"): - today=today-timedelta(days=1) -if(weekend=="Sunday"): - today=today-timedelta(days=2) -# Yesterday date -yesterday = today - timedelta(days = 1) -start='2010-01-01' -end=yesterday - - -if(today.isoweekday()==1): - current = yesterday = today - timedelta(days = 2) -else: - current = yesterday = today - timedelta(days = 1) - - - -st.title(":computer: Stock Market Predictor") #Title heading of the page -st.markdown("##") - -with st.sidebar: - st.title("World Market") - st.title("NIFTY") - nifval=True - try: - nif = pdr.get_data_yahoo('^NSEI', start=current - timedelta(7), end=current)['Close'].tail(1) - except: - nifval=False - if nifval==True: - st.header(nif.iloc[0].round(2)) - else: - st.write("Can't load right now") - st.markdown("""---""") - - st.title("SENSEX") - senval=True - try: - sen = pdr.get_data_yahoo('^BSESN', start=current - timedelta(7), end=current)['Close'].tail(1) - except: - senval=False - if senval==True: - st.header(sen.iloc[0].round(2)) - else: - st.write("Can't load right now") - - st.markdown("""---""") - - st.title("S&P FUTURES") - spval=True - try: - sp = pdr.get_data_yahoo('ES=F', start=current - timedelta(7), end=current)['Close'].tail(1) - except: - spval=False - if spval==True: - st.header(sp.iloc[0].round(2)) - else: - st.write("Can't load right now") - st.markdown("""---""") - - st.title("GOLD") - goldval=True - try: - gold = pdr.get_data_yahoo('GC=F', start=current - timedelta(7), end=current)['Close'].tail(1) - except: - goldval=False - if goldval==True: - st.header(gold.iloc[0].round(2)) - else: - st.write("Can't load right now") - st.markdown("""---""") - - st.title("DOW") - dowval=True - try: - dow = pdr.get_data_yahoo('YM=F', start=current - timedelta(7), end=current)['Close'].tail(1) - except: - dowval=False - if dowval==True: - st.header(dow.iloc[0].round(2)) - else: - st.write("Can't load right now") - - st.markdown("""---""") - st.title("NASDAQ") - nasval=True - try: - nas = pdr.get_data_yahoo('NQ=F', start=current - timedelta(7), end=current)['Close'].tail(1) - except: - nasval=False - if nasval==True: - st.header(nas.iloc[0].round(2)) - else: - st.write("Can't load right now") - st.markdown("""---""") - - st.title("CRUDE OIL") - oilval=True - try: - oil = pdr.get_data_yahoo('CL=F', start=current - timedelta(7), end=current)['Close'].tail(1) - except: - oilval=False - if oilval==True: - st.header(oil.iloc[0].round(2)) - else: - st.write("Can't load right now") - st.markdown("""---""") - - - - - -st.subheader("Enter Stock Ticker") -user_input=st.text_input('','HDFCBANK.BO') - -val=True -try: - df = pdr.get_data_yahoo(user_input, start=start, end=end) -except: - val=False - st.write("Wrong ticker. Select again") - st.markdown("""---""") - error = load_lottieurl("https://assets9.lottiefiles.com/packages/lf20_k1rx9jox.json") #get the animated gif from file - st_lottie(error, key="Dashboard1", height=400) #change the size to height 400 - -if val==True: - date=df.index - - st.header("Recent Price") - curr = pdr.get_data_yahoo(user_input, start=current - timedelta(7), end=current)['Close'].tail(1) - st.header(curr.iloc[0].round(2)) - st.markdown("""---""") - - left_column, right_column = st.columns(2) #Columns divided into two parts - with left_column: - dashboard1 = load_lottieurl("https://assets10.lottiefiles.com/packages/lf20_kuhijlvx.json") #get the animated gif from file - st_lottie(dashboard1, key="Dashboard1", height=400) #change the size to height 400 - with right_column: - dashboard2 = load_lottieurl("https://assets10.lottiefiles.com/packages/lf20_i2eyukor.json") #get the animated gif from file - st_lottie(dashboard2, key="Dashboard2", height=400) #change the size to height 400 - - - st.markdown("""---""") - - #Describing data - st.subheader('Data from 2008 to '+str(end.year)) - st.write(df.describe()) - - st.markdown("""---""") - #Visualisations - st.subheader("Closing Price vs Time Chart of "+str(user_input)) #Header - #plot a line graph - fig_line = px.line( - df, - x = df.index, - y = "Close", - width=1400, #width of the chart - height=750, #height of the chart - ) - #remove the background of the back label - fig_line.update_layout( - plot_bgcolor="rgba(0,0,0,0)", #rgba means transparent - xaxis=(dict(showgrid=False)) #dont show the grid - ) - #plot the chart - st.plotly_chart(fig_line, use_container_width=True) - st.markdown("""---""") - - - st.subheader("Closing Price vs Time with 100MA of "+str(user_input)) #Header - ma100=df.Close.rolling(100).mean() - #plot a line graph - fig_line = px.line( - ma100, - x = df.index, - y = ma100, - width=1400, #width of the chart - height=750, #height of the chart - ) - #remove the background of the back label - fig_line.update_layout( - plot_bgcolor="rgba(0,0,0,0)", #rgba means transparent - xaxis=(dict(showgrid=False)) #dont show the grid - ) - #plot the chart - st.plotly_chart(fig_line, use_container_width=True) - st.markdown("""---""") - - - st.subheader("Closing Price vs Time with 1 year moving average of "+str(user_input)) #Header - ma365=df.Close.rolling(365).mean() - #plot a line graph - fig_line = px.line( - ma365, - x = df.index, - y = ma365, - width=1400, #width of the chart - height=750, #height of the chart - ) - #remove the background of the back label - fig_line.update_layout( - plot_bgcolor="rgba(0,0,0,0)", #rgba means transparent - xaxis=(dict(showgrid=False)) #dont show the grid - ) - #plot the chart - st.plotly_chart(fig_line, use_container_width=True) - st.markdown("""---""") - - - - #Splitting data into training and testing - - data_training= pd.DataFrame(df['Close'][0:int(len(df)*0.7)]) - data_testing= pd.DataFrame(df['Close'][int(len(df)*0.7):int(len(df))]) - ydate= date[int(len(df)*0.7):int(len(df))] - - #normalising data - - scaler=MinMaxScaler(feature_range=(0,1)) - - dataset_train = scaler.fit_transform(data_training) - dataset_test = scaler.transform(data_testing) - - def create_dataset(df): - x = [] - y = [] - for i in range(50, df.shape[0]): - x.append(df[i-50:i, 0]) - y.append(df[i, 0]) - x = np.array(x) - y = np.array(y) - return x,y - - #Creating dataset - x_train, y_train = create_dataset(dataset_train) - x_test, y_test = create_dataset(dataset_test) - - x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1)) - x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1)) - - #Load my model - model=load_model('stock_prediction.h5') - - predictions = model.predict(x_test) - predictions = scaler.inverse_transform(predictions) - y_test_scaled = scaler.inverse_transform(y_test.reshape(-1, 1)) - - cydate=ydate[50:] - st.markdown("""---""") - st.subheader("Actual Vs Predicted Price Graph for "+user_input) - fig, ax = plt.subplots(figsize=(16,8)) - ax.set_facecolor('#000041') - ax.plot(cydate,y_test_scaled, color='red', label='Original price') - plt.plot(cydate,predictions, color='cyan', label='Predicted price') - plt.xlabel("Date") - plt.ylabel("Price") - plt.title("Stocks for the company "+str(user_input)) - plt.legend() - st.pyplot(fig) - - - - -st.markdown("

    Made with ❤️ by Harsh Kashyap

    ", unsafe_allow_html=True) \ No newline at end of file diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/README.md b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/README.md deleted file mode 100644 index 02892bc9dd4344e550596d238e2b71870cfc7dd3..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/README.md +++ /dev/null @@ -1,220 +0,0 @@ -# vakyansh-tts -Text to Speech for Indic languages - -## 1. Installation and Setup for training - -Clone repo -Note : for multspeaker glow-tts training use branch [multispeaker](https://github.com/Open-Speech-EkStep/vakyansh-tts/tree/multispeaker) -``` -git clone https://github.com/Open-Speech-EkStep/vakyansh-tts -``` -Build conda virtual environment -``` -cd ./vakyansh-tts -conda create --name python=3.7 -conda activate -pip install -r requirements.txt -``` -Install [apex](https://github.com/NVIDIA/apex); commit: 37cdaf4 for Mixed-precision training - -Note : used only for glow-tts -``` -cd .. -git clone https://github.com/NVIDIA/apex -cd apex -git checkout 37cdaf4 -pip install -v --disable-pip-version-check --no-cache-dir ./ -cd ../vakyansh-tts -``` -Build Monotonic Alignment Search Code (Cython) - -Note : used only for glow-tts -``` -bash install.sh -``` - -## 2. Data Resampling - -The data format should have a folder containing all the .wav files for glow-tts and a text file containing filenames with their sentences. - -Directory structure: - -langauge_folder_name -``` -language_folder_name -|-- ./wav/*.wav -|-- ./text_file_name.txt -``` -The format for text_file_name.txt (Text file is only needed for glow-tts training) - -``` -( audio1.wav "Sentence1." ) -( audio2.wav "Sentence2." ) -``` - -To resample the .wav files to 22050 sample rate, change the following parameters in the vakyansh-tts/scripts/data/resample.sh - -``` -input_wav_path : absolute path to wav file folder in vakyansh_tts/data/ -output_wav_path : absolute path to vakyansh_tts/data/resampled_wav_folder_name -output_sample_rate : 22050 (or any other desired sample rate) -``` - -To run: -```bash -cd scripts/data/ -bash resample.sh -``` - - -## 3. Spectogram Training (glow-tts) - -### 3.1 Data Preparation - - -To prepare the data edit the vakyansh-tts/scripts/glow/prepare_data.sh file and change the following parameters -``` -input_text_path : absolute path to vakyansh_tts/data/text_file_name.txt -input_wav_path : absolute path to vakyansh_tts/data/resampled_wav_folder_name -gender : female or male voice -``` -To run: -```bash -cd scripts/glow/ -bash prepare_data.sh -``` -### 3.2 Training glow-tts - -To start the spectogram-training edit the vakyansh-tts/scripts/glow/train_glow.sh file and change the following parameter: -``` -gender : female or male voice -``` -Make sure that the gender is same as that of the prepare_data.sh file - -To start the training, run: -```bash -cd scripts/glow/ -bash train_glow.sh -``` -## 4. Vocoder Training (hifi-gan) - -### 4.1 Data Preparation - -To prepare the data edit the vakyansh-tts/scripts/hifi/prepare_data.sh file and change the following parameters -``` -input_wav_path : absolute path to vakyansh_tts/data/resampled_wav_folder_name -gender : female or male voice -``` -To run: -```bash -cd scripts/hifi/ -bash prepare_data.sh -``` -### 4.2 Training hifi-gan - -To start the spectogram-training edit the vakyansh-tts/scripts/hifi/train_hifi.sh file and change the following parameter: -``` -gender : female or male voice -``` -Make sure that the gender is same as that of the prepare_data.sh file - -To start the training, run: -```bash -cd scripts/hifi/ -bash train_hifi.sh -``` - -## 5. Inference - -### 5.1 Using Gradio - -To use the gradio link edit the following parameters in the vakyansh-tts/scripts/inference/gradio.sh file: -``` -gender : female or male voice -device : cpu or cuda -lang : langauge code -``` - -To run: -```bash -cd scripts/inference/ -bash gradio.sh -``` -### 5.2 Using fast API -To use the fast api link edit the parameters in the vakyansh-tts/scripts/inference/api.sh file similar to section 5.1 - -To run: -```bash -cd scripts/inference/ -bash api.sh -``` - -### 5.3 Direct Inference using text -To infer, edit the parameters in the vakyansh-tts/scripts/inference/infer.sh file similar to section 5.1 and set the text to the text variable - -To run: -```bash -cd scripts/inference/ -bash infer.sh -``` - -To configure other parameters there is a version that runs the advanced inference as well. Additional Parameters: -``` -noise_scale : can vary from 0 to 1 for noise factor -length_scale : can vary from 0 to 2 for changing the speed of the generated audio -transliteration : whether to switch on/off transliteration. 1: ON, 0: OFF -number_conversion : whether to switch on/off number to words conversion. 1: ON, 0: OFF -split_sentences : whether to switch on/off splitting of sentences. 1: ON, 0: OFF -``` -To run: -``` -cd scripts/inference/ -bash advanced_infer.sh -``` - -### 5.4 Installation of tts_infer package - -In tts_infer package, we currently have two components: - - 1. Transliteration (AI4bharat's open sourced models) (Languages supported: {'hi', 'gu', 'mr', 'bn', 'te', 'ta', 'kn', 'pa', 'gom', 'mai', 'ml', 'sd', 'si', 'ur'} ) - - 2. Num to Word (Languages supported: {'en', 'hi', 'gu', 'mr', 'bn', 'te', 'ta', 'kn', 'or', 'pa'} ) -``` -git clone https://github.com/Open-Speech-EkStep/vakyansh-tts -cd vakyansh-tts -bash install.sh -python setup.py bdist_wheel -pip install -e . -cd tts_infer -gsutil -m cp -r gs://vakyaansh-open-models/translit_models . -``` - -Usage: Refer to example file in tts_infer/ -``` -from tts_infer.tts import TextToMel, MelToWav -from tts_infer.transliterate import XlitEngine -from tts_infer.num_to_word_on_sent import normalize_nums - -import re -from scipy.io.wavfile import write - -text_to_mel = TextToMel(glow_model_dir='/path/to/glow-tts/checkpoint/dir', device='cuda') -mel_to_wav = MelToWav(hifi_model_dir='/path/to/hifi/checkpoint/dir', device='cuda') - -def translit(text, lang): - reg = re.compile(r'[a-zA-Z]') - engine = XlitEngine(lang) - words = [engine.translit_word(word, topk=1)[lang][0] if reg.match(word) else word for word in text.split()] - updated_sent = ' '.join(words) - return updated_sent - -def run_tts(text, lang): - text = text.replace('।', '.') # only for hindi models - text_num_to_word = normalize_nums(text, lang) # converting numbers to words in lang - text_num_to_word_and_transliterated = translit(text_num_to_word, lang) # transliterating english words to lang - - mel = text_to_mel.generate_mel(text_num_to_word_and_transliterated) - audio, sr = mel_to_wav.generate_wav(mel) - write(filename='temp.wav', rate=sr, data=audio) # for saving wav file, if needed - return (sr, audio) -``` diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/hifi/env.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/hifi/env.py deleted file mode 100644 index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/hifi/env.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import shutil - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def build_env(config, config_name, path): - t_path = os.path.join(path, config_name) - if config != t_path: - os.makedirs(path, exist_ok=True) - shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/components.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/components.py deleted file mode 100644 index ae41c535846c530643965cf969bd438a54295469..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/components.py +++ /dev/null @@ -1,5068 +0,0 @@ -"""Contains all of the components that can be used with Gradio Interface / Blocks. -Along with the docs for each component, you can find the names of example demos that use -each component. These demos are located in the `demo` directory.""" - -from __future__ import annotations - -import inspect -import json -import math -import operator -import random -import tempfile -import uuid -import warnings -from copy import deepcopy -from enum import Enum -from pathlib import Path -from types import ModuleType -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type - -import altair as alt -import matplotlib.figure -import numpy as np -import pandas as pd -import PIL -import PIL.ImageOps -from ffmpy import FFmpeg -from markdown_it import MarkdownIt -from mdit_py_plugins.dollarmath.index import dollarmath_plugin -from pandas.api.types import is_numeric_dtype -from PIL import Image as _Image # using _ to minimize namespace pollution -from typing_extensions import Literal - -from gradio import media_data, processing_utils, utils -from gradio.blocks import Block, BlockContext -from gradio.context import Context -from gradio.documentation import document, set_documentation_group -from gradio.events import ( - Blurrable, - Changeable, - Clearable, - Clickable, - Editable, - Playable, - Streamable, - Submittable, - Uploadable, -) -from gradio.layouts import Column, Form, Row -from gradio.processing_utils import TempFileManager -from gradio.serializing import ( - FileSerializable, - ImgSerializable, - JSONSerializable, - Serializable, - SimpleSerializable, -) - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - - -set_documentation_group("component") -_Image.init() # fixes https://github.com/gradio-app/gradio/issues/2843 - - -class _Keywords(Enum): - NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is provided as a argument for `value` in `Component.update()` - FINISHED_ITERATING = "FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state) - - -class Component(Block): - """ - A base class for defining the methods that all gradio components should have. - """ - - def __str__(self): - return self.__repr__() - - def __repr__(self): - return f"{self.get_block_name()}" - - def get_config(self): - """ - :return: a dictionary with context variables for the javascript file associated with the context - """ - return { - "name": self.get_block_name(), - **super().get_config(), - } - - def preprocess(self, x: Any) -> Any: - """ - Any preprocessing needed to be performed on function input. - """ - return x - - def postprocess(self, y): - """ - Any postprocessing needed to be performed on function output. - """ - return y - - def style( - self, - *, - container: bool | None = None, - **kwargs, - ): - """ - This method can be used to change the appearance of the component. - Parameters: - container: If True, will place the component in a container - providing some extra padding around the border. - """ - put_deprecated_params_in_box = False - if "rounded" in kwargs: - warnings.warn( - "'rounded' styling is no longer supported. To round adjacent components together, place them in a Column(variant='box')." - ) - if isinstance(kwargs["rounded"], list) or isinstance( - kwargs["rounded"], tuple - ): - put_deprecated_params_in_box = True - kwargs.pop("rounded") - if "margin" in kwargs: - warnings.warn( - "'margin' styling is no longer supported. To place adjacent components together without margin, place them in a Column(variant='box')." - ) - if isinstance(kwargs["margin"], list) or isinstance( - kwargs["margin"], tuple - ): - put_deprecated_params_in_box = True - kwargs.pop("margin") - if "border" in kwargs: - warnings.warn( - "'border' styling is no longer supported. To place adjacent components in a shared border, place them in a Column(variant='box')." - ) - kwargs.pop("border") - if container is not None: - self._style["container"] = container - if len(kwargs): - for key in kwargs: - warnings.warn(f"Unknown style parameter: {key}") - if put_deprecated_params_in_box and isinstance(self.parent, (Row, Column)): - if self.parent.variant == "default": - self.parent.variant = "compact" - return self - - -class IOComponent(Component, Serializable): - """ - A base class for defining methods that all input/output components should have. - """ - - def __init__( - self, - *, - value: Any = None, - label: str | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - load_fn: Callable | None = None, - every: float | None = None, - **kwargs, - ): - super().__init__(elem_id=elem_id, visible=visible, **kwargs) - - self.label = label - self.show_label = show_label - self.interactive = interactive - - self.load_event = None - self.load_event_to_attach = None - load_fn, initial_value = self.get_load_fn_and_initial_value(value) - self.value = ( - initial_value - if self._skip_init_processing - else self.postprocess(initial_value) - ) - if callable(load_fn): - self.load_event = self.attach_load_event(load_fn, every) - - self.set_interpret_parameters() - - def get_config(self): - return { - "label": self.label, - "show_label": self.show_label, - "interactive": self.interactive, - **super().get_config(), - } - - def set_interpret_parameters(self): - """ - Set any parameters for interpretation. - """ - return self - - def get_interpretation_neighbors(self, x: Any) -> Tuple[List, Dict, bool]: - """ - Generates values similar to input to be used to interpret the significance of the input in the final output. - Parameters: - x: Input to interface - Returns: (neighbor_values, interpret_kwargs, interpret_by_removal) - neighbor_values: Neighboring values to input x to compute for interpretation - interpret_kwargs: Keyword arguments to be passed to get_interpretation_scores - interpret_by_removal: If True, returned neighbors are values where the interpreted subsection was removed. If False, returned neighbors are values where the interpreted subsection was modified to a different value. - """ - return [], {}, True - - def get_interpretation_scores( - self, x: Any, neighbors: List[Any], scores: List[float], **kwargs - ) -> List: - """ - Arrange the output values from the neighbors into interpretation scores for the interface to render. - Parameters: - x: Input to interface - neighbors: Neighboring values to input x used for interpretation. - scores: Output value corresponding to each neighbor in neighbors - Returns: - Arrangement of interpretation scores for interfaces to render. - """ - return [] - - def generate_sample(self) -> Any: - """ - Returns a sample value of the input that would be accepted by the api. Used for api documentation. - """ - pass - - @staticmethod - def add_interactive_to_config(config, interactive): - if interactive is not None: - config["mode"] = "dynamic" if interactive else "static" - return config - - @staticmethod - def get_load_fn_and_initial_value(value): - if callable(value): - initial_value = value() - load_fn = value - else: - initial_value = value - load_fn = None - return load_fn, initial_value - - def attach_load_event(self, callable: Callable, every: float | None): - """Add a load event that runs `callable`, optionally every `every` seconds.""" - if Context.root_block: - return Context.root_block.load( - callable, - None, - self, - no_target=True, - every=every, - ) - else: - self.load_event_to_attach = (callable, every) - - def as_example(self, input_data): - """Return the input data in a way that can be displayed by the examples dataset component in the front-end.""" - return input_data - - -class FormComponent: - def get_expected_parent(self) -> Type[Form]: - return Form - - -@document("change", "submit", "blur", "style") -class Textbox( - FormComponent, Changeable, Submittable, Blurrable, IOComponent, SimpleSerializable -): - """ - Creates a textarea for user to enter string input or display string output. - Preprocessing: passes textarea value as a {str} into the function. - Postprocessing: expects a {str} returned from function and sets textarea value to it. - Examples-format: a {str} representing the textbox input. - - Demos: hello_world, diff_texts, sentence_builder - Guides: creating_a_chatbot, real_time_speech_recognition - """ - - def __init__( - self, - value: str | Callable | None = "", - *, - lines: int = 1, - max_lines: int = 20, - placeholder: str | None = None, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - type: str = "text", - **kwargs, - ): - """ - Parameters: - value: default text to provide in textarea. If callable, the function will be called whenever the app loads to set the initial value of the component. - lines: minimum number of line rows to provide in textarea. - max_lines: maximum number of line rows to provide in textarea. - placeholder: placeholder hint to provide behind textarea. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, will be rendered as an editable textbox; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - type: The type of textbox. One of: 'text', 'password', 'email', Default is 'text'. - """ - if type not in ["text", "password", "email"]: - raise ValueError('`type` must be one of "text", "password", or "email".') - - # - self.lines = lines - self.max_lines = max_lines if type == "text" else 1 - self.placeholder = placeholder - self.interpret_by_tokens = True - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - self.cleared_value = "" - self.test_input = value - self.type = type - - def get_config(self): - return { - "lines": self.lines, - "max_lines": self.max_lines, - "placeholder": self.placeholder, - "value": self.value, - "type": self.type, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - lines: int | None = None, - max_lines: int | None = None, - placeholder: str | None = None, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - interactive: bool | None = None, - type: str | None = None, - ): - updated_config = { - "lines": lines, - "max_lines": max_lines, - "placeholder": placeholder, - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "type": type, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def generate_sample(self) -> str: - return "Hello World" - - def preprocess(self, x: str | None) -> str | None: - """ - Preprocesses input (converts it to a string) before passing it to the function. - Parameters: - x: text - Returns: - text - """ - return None if x is None else str(x) - - def postprocess(self, y: str | None) -> str | None: - """ - Postproccess the function output y by converting it to a str before passing it to the frontend. - Parameters: - y: function output to postprocess. - Returns: - text - """ - return None if y is None else str(y) - - def set_interpret_parameters( - self, separator: str = " ", replacement: str | None = None - ): - """ - Calculates interpretation score of characters in input by splitting input into tokens, then using a "leave one out" method to calculate the score of each token by removing each token and measuring the delta of the output value. - Parameters: - separator: Separator to use to split input into tokens. - replacement: In the "leave one out" step, the text that the token should be replaced with. If None, the token is removed altogether. - """ - self.interpretation_separator = separator - self.interpretation_replacement = replacement - return self - - def tokenize(self, x: str) -> Tuple[List[str], List[str], None]: - """ - Tokenizes an input string by dividing into "words" delimited by self.interpretation_separator - """ - tokens = x.split(self.interpretation_separator) - leave_one_out_strings = [] - for index in range(len(tokens)): - leave_one_out_set = list(tokens) - if self.interpretation_replacement is None: - leave_one_out_set.pop(index) - else: - leave_one_out_set[index] = self.interpretation_replacement - leave_one_out_strings.append( - self.interpretation_separator.join(leave_one_out_set) - ) - return tokens, leave_one_out_strings, None - - def get_masked_inputs( - self, tokens: List[str], binary_mask_matrix: List[List[int]] - ) -> List[str]: - """ - Constructs partially-masked sentences for SHAP interpretation - """ - masked_inputs = [] - for binary_mask_vector in binary_mask_matrix: - masked_input = np.array(tokens)[np.array(binary_mask_vector, dtype=bool)] - masked_inputs.append(self.interpretation_separator.join(masked_input)) - return masked_inputs - - def get_interpretation_scores( - self, x, neighbors, scores: List[float], tokens: List[str], masks=None, **kwargs - ) -> List[Tuple[str, float]]: - """ - Returns: - Each tuple set represents a set of characters and their corresponding interpretation score. - """ - result = [] - for token, score in zip(tokens, scores): - result.append((token, score)) - result.append((self.interpretation_separator, 0)) - return result - - -@document("change", "submit", "style") -class Number( - FormComponent, Changeable, Submittable, Blurrable, IOComponent, SimpleSerializable -): - """ - Creates a numeric field for user to enter numbers as input or display numeric output. - Preprocessing: passes field value as a {float} or {int} into the function, depending on `precision`. - Postprocessing: expects an {int} or {float} returned from the function and sets field value to it. - Examples-format: a {float} or {int} representing the number's value. - - Demos: tax_calculator, titanic_survival, blocks_simple_squares - """ - - def __init__( - self, - value: float | Callable | None = None, - *, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - precision: int | None = None, - **kwargs, - ): - """ - Parameters: - value: default value. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, will be editable; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - precision: Precision to round input/output to. If set to 0, will round to nearest integer and covert type to int. If None, no rounding happens. - """ - self.precision = precision - self.interpret_by_tokens = False - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - self.test_input = self.value if self.value is not None else 1 - - @staticmethod - def _round_to_precision(num: float | int, precision: int | None) -> float | int: - """ - Round to a given precision. - - If precision is None, no rounding happens. If 0, num is converted to int. - - Parameters: - num: Number to round. - precision: Precision to round to. - Returns: - rounded number - """ - if precision is None: - return float(num) - elif precision == 0: - return int(round(num, precision)) - else: - return round(num, precision) - - def get_config(self): - return { - "value": self.value, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: float | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def preprocess(self, x: float | None) -> float | None: - """ - Parameters: - x: numeric input - Returns: - number representing function input - """ - if x is None: - return None - return self._round_to_precision(x, self.precision) - - def postprocess(self, y: float | None) -> float | None: - """ - Any postprocessing needed to be performed on function output. - - Parameters: - y: numeric output - Returns: - number representing function output - """ - if y is None: - return None - return self._round_to_precision(y, self.precision) - - def set_interpret_parameters( - self, steps: int = 3, delta: float = 1, delta_type: str = "percent" - ): - """ - Calculates interpretation scores of numeric values close to the input number. - Parameters: - steps: Number of nearby values to measure in each direction (above and below the input number). - delta: Size of step in each direction between nearby values. - delta_type: "percent" if delta step between nearby values should be a calculated as a percent, or "absolute" if delta should be a constant step change. - """ - self.interpretation_steps = steps - self.interpretation_delta = delta - self.interpretation_delta_type = delta_type - return self - - def get_interpretation_neighbors(self, x: float | int) -> Tuple[List[float], Dict]: - x = self._round_to_precision(x, self.precision) - if self.interpretation_delta_type == "percent": - delta = 1.0 * self.interpretation_delta * x / 100 - elif self.interpretation_delta_type == "absolute": - delta = self.interpretation_delta - else: - delta = self.interpretation_delta - if self.precision == 0 and math.floor(delta) != delta: - raise ValueError( - f"Delta value {delta} is not an integer and precision=0. Cannot generate valid set of neighbors. " - "If delta_type='percent', pick a value of delta such that x * delta is an integer. " - "If delta_type='absolute', pick a value of delta that is an integer." - ) - # run_interpretation will preprocess the neighbors so no need to covert to int here - negatives = ( - np.array(x) + np.arange(-self.interpretation_steps, 0) * delta - ).tolist() - positives = ( - np.array(x) + np.arange(1, self.interpretation_steps + 1) * delta - ).tolist() - return negatives + positives, {} - - def get_interpretation_scores( - self, x: float, neighbors: List[float], scores: List[float | None], **kwargs - ) -> List[Tuple[float, float | None]]: - """ - Returns: - Each tuple set represents a numeric value near the input and its corresponding interpretation score. - """ - interpretation = list(zip(neighbors, scores)) - interpretation.insert(int(len(interpretation) / 2), (x, None)) - return interpretation - - def generate_sample(self) -> float: - return self._round_to_precision(1, self.precision) - - -@document("change", "style") -class Slider(FormComponent, Changeable, IOComponent, SimpleSerializable): - """ - Creates a slider that ranges from `minimum` to `maximum` with a step size of `step`. - Preprocessing: passes slider value as a {float} into the function. - Postprocessing: expects an {int} or {float} returned from function and sets slider value to it as long as it is within range. - Examples-format: A {float} or {int} representing the slider's value. - - Demos: sentence_builder, generate_tone, titanic_survival, interface_random_slider, blocks_random_slider - Guides: create_your_own_friends_with_a_gan - """ - - def __init__( - self, - minimum: float = 0, - maximum: float = 100, - value: float | Callable | None = None, - *, - step: float | None = None, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - randomize: bool = False, - **kwargs, - ): - """ - Parameters: - minimum: minimum value for slider. - maximum: maximum value for slider. - value: default value. If callable, the function will be called whenever the app loads to set the initial value of the component. Ignored if randomized=True. - step: increment between slider values. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, slider will be adjustable; if False, adjusting will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - randomize: If True, the value of the slider when the app loads is taken uniformly at random from the range given by the minimum and maximum. - """ - self.minimum = minimum - self.maximum = maximum - if step is None: - difference = maximum - minimum - power = math.floor(math.log10(difference) - 2) - self.step = 10**power - else: - self.step = step - if randomize: - value = self.get_random_value - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - self.cleared_value = self.value - self.test_input = self.value - self.interpret_by_tokens = False - - def get_config(self): - return { - "minimum": self.minimum, - "maximum": self.maximum, - "step": self.step, - "value": self.value, - **IOComponent.get_config(self), - } - - def get_random_value(self): - n_steps = int((self.maximum - self.minimum) / self.step) - step = random.randint(0, n_steps) - value = self.minimum + step * self.step - # Round to number of decimals in step so that UI doesn't display long decimals - n_decimals = max(str(self.step)[::-1].find("."), 0) - if n_decimals: - value = round(value, n_decimals) - return value - - @staticmethod - def update( - value: float | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - minimum: float | None = None, - maximum: float | None = None, - step: float | None = None, - label: str | None = None, - show_label: bool | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "minimum": minimum, - "maximum": maximum, - "step": step, - "label": label, - "show_label": show_label, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def generate_sample(self) -> float: - return self.maximum - - def postprocess(self, y: float | None) -> float | None: - """ - Any postprocessing needed to be performed on function output. - Parameters: - y: numeric output - Returns: - numeric output or minimum number if None - """ - return self.minimum if y is None else y - - def set_interpret_parameters(self, steps: int = 8) -> "Slider": - """ - Calculates interpretation scores of numeric values ranging between the minimum and maximum values of the slider. - Parameters: - steps: Number of neighboring values to measure between the minimum and maximum values of the slider range. - """ - self.interpretation_steps = steps - return self - - def get_interpretation_neighbors(self, x) -> Tuple[object, dict]: - return ( - np.linspace(self.minimum, self.maximum, self.interpretation_steps).tolist(), - {}, - ) - - def get_interpretation_scores( - self, x, neighbors, scores: List[float], **kwargs - ) -> List[float]: - """ - Returns: - Each value represents the score corresponding to an evenly spaced range of inputs between the minimum and maximum slider values. - """ - return scores - - def style( - self, - *, - container: bool | None = None, - ): - """ - This method can be used to change the appearance of the slider. - Parameters: - container: If True, will place the component in a container - providing some extra padding around the border. - """ - return Component.style( - self, - container=container, - ) - - -@document("change", "style") -class Checkbox(FormComponent, Changeable, IOComponent, SimpleSerializable): - """ - Creates a checkbox that can be set to `True` or `False`. - - Preprocessing: passes the status of the checkbox as a {bool} into the function. - Postprocessing: expects a {bool} returned from the function and, if it is True, checks the checkbox. - Examples-format: a {bool} representing whether the box is checked. - Demos: sentence_builder, titanic_survival - """ - - def __init__( - self, - value: bool | Callable = False, - *, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: if True, checked by default. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, this checkbox can be checked; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.test_input = True - self.interpret_by_tokens = False - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "value": self.value, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: bool | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "label": label, - "show_label": show_label, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def generate_sample(self): - return True - - def set_interpret_parameters(self): - """ - Calculates interpretation score of the input by comparing the output against the output when the input is the inverse boolean value of x. - """ - return self - - def get_interpretation_neighbors(self, x): - return [not x], {} - - def get_interpretation_scores(self, x, neighbors, scores, **kwargs): - """ - Returns: - The first value represents the interpretation score if the input is False, and the second if the input is True. - """ - if x: - return scores[0], None - else: - return None, scores[0] - - -@document("change", "style") -class CheckboxGroup(FormComponent, Changeable, IOComponent, SimpleSerializable): - """ - Creates a set of checkboxes of which a subset can be checked. - Preprocessing: passes the list of checked checkboxes as a {List[str]} or their indices as a {List[int]} into the function, depending on `type`. - Postprocessing: expects a {List[str]}, each element of which becomes a checked checkbox. - Examples-format: a {List[str]} representing the values to be checked. - Demos: sentence_builder, titanic_survival - """ - - def __init__( - self, - choices: List[str] | None = None, - *, - value: List[str] | str | Callable | None = None, - type: str = "value", - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - choices: list of options to select from. - value: default selected list of options. If callable, the function will be called whenever the app loads to set the initial value of the component. - type: Type of value to be returned by component. "value" returns the list of strings of the choices selected, "index" returns the list of indicies of the choices selected. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, choices in this checkbox group will be checkable; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.choices = choices or [] - self.cleared_value = [] - valid_types = ["value", "index"] - if type not in valid_types: - raise ValueError( - f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" - ) - self.type = type - self.test_input = self.choices - self.interpret_by_tokens = False - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "choices": self.choices, - "value": self.value, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: List[str] - | str - | Literal[_Keywords.NO_VALUE] - | None = _Keywords.NO_VALUE, - choices: List[str] | None = None, - label: str | None = None, - show_label: bool | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "choices": choices, - "label": label, - "show_label": show_label, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def generate_sample(self): - return self.choices - - def preprocess(self, x: List[str]) -> List[str] | List[int]: - """ - Parameters: - x: list of selected choices - Returns: - list of selected choices as strings or indices within choice list - """ - if self.type == "value": - return x - elif self.type == "index": - return [self.choices.index(choice) for choice in x] - else: - raise ValueError( - "Unknown type: " - + str(self.type) - + ". Please choose from: 'value', 'index'." - ) - - def postprocess(self, y: List[str] | str | None) -> List[str]: - """ - Any postprocessing needed to be performed on function output. - Parameters: - y: List of selected choices. If a single choice is selected, it can be passed in as a string - Returns: - List of selected choices - """ - if y is None: - return [] - if not isinstance(y, list): - y = [y] - return y - - def set_interpret_parameters(self): - """ - Calculates interpretation score of each choice in the input by comparing the output against the outputs when each choice in the input is independently either removed or added. - """ - return self - - def get_interpretation_neighbors(self, x): - leave_one_out_sets = [] - for choice in self.choices: - leave_one_out_set = list(x) - if choice in leave_one_out_set: - leave_one_out_set.remove(choice) - else: - leave_one_out_set.append(choice) - leave_one_out_sets.append(leave_one_out_set) - return leave_one_out_sets, {} - - def get_interpretation_scores(self, x, neighbors, scores, **kwargs): - """ - Returns: - For each tuple in the list, the first value represents the interpretation score if the input is False, and the second if the input is True. - """ - final_scores = [] - for choice, score in zip(self.choices, scores): - if choice in x: - score_set = [score, None] - else: - score_set = [None, score] - final_scores.append(score_set) - return final_scores - - def style( - self, - *, - item_container: bool | None = None, - container: bool | None = None, - **kwargs, - ): - """ - This method can be used to change the appearance of the CheckboxGroup. - Parameters: - item_container: If True, will place the items in a container. - container: If True, will place the component in a container - providing some extra padding around the border. - """ - if item_container is not None: - self._style["item_container"] = item_container - - return Component.style(self, container=container, **kwargs) - - -@document("change", "style") -class Radio(FormComponent, Changeable, IOComponent, SimpleSerializable): - """ - Creates a set of radio buttons of which only one can be selected. - Preprocessing: passes the value of the selected radio button as a {str} or its index as an {int} into the function, depending on `type`. - Postprocessing: expects a {str} corresponding to the value of the radio button to be selected. - Examples-format: a {str} representing the radio option to select. - - Demos: sentence_builder, titanic_survival, blocks_essay - """ - - def __init__( - self, - choices: List[str] | None = None, - *, - value: str | Callable | None = None, - type: str = "value", - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - choices: list of options to select from. - value: the button selected by default. If None, no button is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component. - type: Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, choices in this radio group will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.choices = choices or [] - valid_types = ["value", "index"] - if type not in valid_types: - raise ValueError( - f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" - ) - self.type = type - self.test_input = self.choices[0] if len(self.choices) else None - self.interpret_by_tokens = False - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - self.cleared_value = self.value - - def get_config(self): - return { - "choices": self.choices, - "value": self.value, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - choices: List[str] | None = None, - label: str | None = None, - show_label: bool | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "choices": choices, - "label": label, - "show_label": show_label, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def generate_sample(self): - return self.choices[0] - - def preprocess(self, x: str | None) -> str | int | None: - """ - Parameters: - x: selected choice - Returns: - selected choice as string or index within choice list - """ - if self.type == "value": - return x - elif self.type == "index": - if x is None: - return None - else: - return self.choices.index(x) - else: - raise ValueError( - "Unknown type: " - + str(self.type) - + ". Please choose from: 'value', 'index'." - ) - - def set_interpret_parameters(self): - """ - Calculates interpretation score of each choice by comparing the output against each of the outputs when alternative choices are selected. - """ - return self - - def get_interpretation_neighbors(self, x): - choices = list(self.choices) - choices.remove(x) - return choices, {} - - def get_interpretation_scores( - self, x, neighbors, scores: List[float | None], **kwargs - ) -> List: - """ - Returns: - Each value represents the interpretation score corresponding to each choice. - """ - scores.insert(self.choices.index(x), None) - return scores - - def style( - self, - *, - item_container: bool | None = None, - container: bool | None = None, - **kwargs, - ): - """ - This method can be used to change the appearance of the radio component. - Parameters: - item_container: If True, will place items in a container. - container: If True, will place the component in a container - providing some extra padding around the border. - """ - if item_container is not None: - self._style["item_container"] = item_container - - return Component.style(self, container=container, **kwargs) - - -@document("change", "style") -class Dropdown(Radio): - """ - Creates a dropdown of which only one entry can be selected. - Preprocessing: passes the value of the selected dropdown entry as a {str} or its index as an {int} into the function, depending on `type`. - Postprocessing: expects a {str} corresponding to the value of the dropdown entry to be selected. - Examples-format: a {str} representing the drop down value to select. - Demos: sentence_builder, titanic_survival - """ - - def __init__( - self, - choices: List[str] | None = None, - *, - value: str | Callable | None = None, - type: str = "value", - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - choices: list of options to select from. - value: default value selected in dropdown. If None, no value is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component. - type: Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, choices in this dropdown will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - Radio.__init__( - self, - value=value, - choices=choices, - type=type, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - **kwargs, - ) - - def style(self, *, container: bool | None = None, **kwargs): - """ - This method can be used to change the appearance of the Dropdown. - Parameters: - container: If True, will place the component in a container - providing some extra padding around the border. - """ - return Component.style(self, container=container, **kwargs) - - -@document("edit", "clear", "change", "stream", "change", "style") -class Image( - Editable, - Clearable, - Changeable, - Streamable, - Uploadable, - IOComponent, - ImgSerializable, -): - """ - Creates an image component that can be used to upload/draw images (as an input) or display images (as an output). - Preprocessing: passes the uploaded image as a {numpy.array}, {PIL.Image} or {str} filepath depending on `type` -- unless `tool` is `sketch` AND source is one of `upload` or `webcam`. In these cases, a {dict} with keys `image` and `mask` is passed, and the format of the corresponding values depends on `type`. - Postprocessing: expects a {numpy.array}, {PIL.Image} or {str} or {pathlib.Path} filepath to an image and displays the image. - Examples-format: a {str} filepath to a local file that contains the image. - Demos: image_mod, image_mod_default_image - Guides: Gradio_and_ONNX_on_Hugging_Face, image_classification_in_pytorch, image_classification_in_tensorflow, image_classification_with_vision_transformers, building_a_pictionary_app, create_your_own_friends_with_a_gan - """ - - def __init__( - self, - value: str | _Image.Image | np.ndarray | None = None, - *, - shape: Tuple[int, int] | None = None, - image_mode: str = "RGB", - invert_colors: bool = False, - source: str = "upload", - tool: str | None = None, - type: str = "numpy", - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - streaming: bool = False, - elem_id: str | None = None, - mirror_webcam: bool = True, - **kwargs, - ): - """ - Parameters: - value: A PIL Image, numpy array, path or URL for the default value that Image component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component. - shape: (width, height) shape to crop and resize image to; if None, matches input image size. Pass None for either width or height to only crop and resize the other. - image_mode: "RGB" if color, or "L" if black and white. - invert_colors: whether to invert the image as a preprocessing step. - source: Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools. - tool: Tools used for editing. "editor" allows a full screen editor (and is the default if source is "upload" or "webcam"), "select" provides a cropping and zoom tool, "sketch" allows you to create a binary sketch (and is the default if source="canvas"), and "color-sketch" allows you to created a sketch in different colors. "color-sketch" can be used with source="upload" or "webcam" to allow sketching on an image. "sketch" can also be used with "upload" or "webcam" to create a mask over an image and in that case both the image and mask are passed into the function as a dictionary with keys "image" and "mask" respectively. - type: The format the image is converted to before being passed into the prediction function. "numpy" converts the image to a numpy array with shape (width, height, 3) and values from 0 to 255, "pil" converts the image to a PIL image object, "filepath" passes a str path to a temporary file containing the image. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, will allow users to upload and edit an image; if False, can only be used to display images. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - streaming: If True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'webcam'. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - mirror_webcam: If True webcam will be mirrored. Default is True. - """ - self.mirror_webcam = mirror_webcam - valid_types = ["numpy", "pil", "filepath"] - if type not in valid_types: - raise ValueError( - f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" - ) - self.type = type - self.shape = shape - self.image_mode = image_mode - valid_sources = ["upload", "webcam", "canvas"] - if source not in valid_sources: - raise ValueError( - f"Invalid value for parameter `source`: {source}. Please choose from one of: {valid_sources}" - ) - self.source = source - if tool is None: - self.tool = "sketch" if source == "canvas" else "editor" - else: - self.tool = tool - self.invert_colors = invert_colors - self.test_input = deepcopy(media_data.BASE64_IMAGE) - self.interpret_by_tokens = True - self.streaming = streaming - if streaming and source != "webcam": - raise ValueError("Image streaming only available if source is 'webcam'.") - - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "image_mode": self.image_mode, - "shape": self.shape, - "source": self.source, - "tool": self.tool, - "value": self.value, - "streaming": self.streaming, - "mirror_webcam": self.mirror_webcam, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "label": label, - "show_label": show_label, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def _format_image( - self, im: _Image.Image | None - ) -> np.ndarray | _Image.Image | str | None: - """Helper method to format an image based on self.type""" - if im is None: - return im - fmt = im.format - if self.type == "pil": - return im - elif self.type == "numpy": - return np.array(im) - elif self.type == "filepath": - file_obj = tempfile.NamedTemporaryFile( - delete=False, - suffix=("." + fmt.lower() if fmt is not None else ".png"), - ) - im.save(file_obj.name) - return file_obj.name - else: - raise ValueError( - "Unknown type: " - + str(self.type) - + ". Please choose from: 'numpy', 'pil', 'filepath'." - ) - - def generate_sample(self): - return deepcopy(media_data.BASE64_IMAGE) - - def preprocess( - self, x: str | Dict[str, str] - ) -> np.ndarray | _Image.Image | str | Dict | None: - """ - Parameters: - x: base64 url data, or (if tool == "sketch") a dict of image and mask base64 url data - Returns: - image in requested format, or (if tool == "sketch") a dict of image and mask in requested format - """ - if x is None: - return x - - mask = "" - if self.tool == "sketch" and self.source in ["upload", "webcam"]: - assert isinstance(x, dict) - x, mask = x["image"], x["mask"] - - assert isinstance(x, str) - im = processing_utils.decode_base64_to_image(x) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - im = im.convert(self.image_mode) - if self.shape is not None: - im = processing_utils.resize_and_crop(im, self.shape) - if self.invert_colors: - im = PIL.ImageOps.invert(im) - if ( - self.source == "webcam" - and self.mirror_webcam is True - and self.tool != "color-sketch" - ): - im = PIL.ImageOps.mirror(im) - - if self.tool == "sketch" and self.source in ["upload", "webcam"]: - if mask is None: - return self._format_image(im) - mask_im = processing_utils.decode_base64_to_image(mask) - return { - "image": self._format_image(im), - "mask": self._format_image(mask_im), - } - - return self._format_image(im) - - def postprocess( - self, y: np.ndarray | _Image.Image | str | Path | None - ) -> str | None: - """ - Parameters: - y: image as a numpy array, PIL Image, string/Path filepath, or string URL - Returns: - base64 url data - """ - if y is None: - return None - if isinstance(y, np.ndarray): - return processing_utils.encode_array_to_base64(y) - elif isinstance(y, _Image.Image): - return processing_utils.encode_pil_to_base64(y) - elif isinstance(y, (str, Path)): - return processing_utils.encode_url_or_file_to_base64(y) - else: - raise ValueError("Cannot process this value as an Image") - - def set_interpret_parameters(self, segments: int = 16): - """ - Calculates interpretation score of image subsections by splitting the image into subsections, then using a "leave one out" method to calculate the score of each subsection by whiting out the subsection and measuring the delta of the output value. - Parameters: - segments: Number of interpretation segments to split image into. - """ - self.interpretation_segments = segments - return self - - def _segment_by_slic(self, x): - """ - Helper method that segments an image into superpixels using slic. - Parameters: - x: base64 representation of an image - """ - x = processing_utils.decode_base64_to_image(x) - if self.shape is not None: - x = processing_utils.resize_and_crop(x, self.shape) - resized_and_cropped_image = np.array(x) - try: - from skimage.segmentation import slic - except (ImportError, ModuleNotFoundError): - raise ValueError( - "Error: running this interpretation for images requires scikit-image, please install it first." - ) - try: - segments_slic = slic( - resized_and_cropped_image, - self.interpretation_segments, - compactness=10, - sigma=1, - start_label=1, - ) - except TypeError: # For skimage 0.16 and older - segments_slic = slic( - resized_and_cropped_image, - self.interpretation_segments, - compactness=10, - sigma=1, - ) - return segments_slic, resized_and_cropped_image - - def tokenize(self, x): - """ - Segments image into tokens, masks, and leave-one-out-tokens - Parameters: - x: base64 representation of an image - Returns: - tokens: list of tokens, used by the get_masked_input() method - leave_one_out_tokens: list of left-out tokens, used by the get_interpretation_neighbors() method - masks: list of masks, used by the get_interpretation_neighbors() method - """ - segments_slic, resized_and_cropped_image = self._segment_by_slic(x) - tokens, masks, leave_one_out_tokens = [], [], [] - replace_color = np.mean(resized_and_cropped_image, axis=(0, 1)) - for (i, segment_value) in enumerate(np.unique(segments_slic)): - mask = segments_slic == segment_value - image_screen = np.copy(resized_and_cropped_image) - image_screen[segments_slic == segment_value] = replace_color - leave_one_out_tokens.append( - processing_utils.encode_array_to_base64(image_screen) - ) - token = np.copy(resized_and_cropped_image) - token[segments_slic != segment_value] = 0 - tokens.append(token) - masks.append(mask) - return tokens, leave_one_out_tokens, masks - - def get_masked_inputs(self, tokens, binary_mask_matrix): - masked_inputs = [] - for binary_mask_vector in binary_mask_matrix: - masked_input = np.zeros_like(tokens[0], dtype=int) - for token, b in zip(tokens, binary_mask_vector): - masked_input = masked_input + token * int(b) - masked_inputs.append(processing_utils.encode_array_to_base64(masked_input)) - return masked_inputs - - def get_interpretation_scores( - self, x, neighbors, scores, masks, tokens=None, **kwargs - ) -> List[List[float]]: - """ - Returns: - A 2D array representing the interpretation score of each pixel of the image. - """ - x = processing_utils.decode_base64_to_image(x) - if self.shape is not None: - x = processing_utils.resize_and_crop(x, self.shape) - x = np.array(x) - output_scores = np.zeros((x.shape[0], x.shape[1])) - - for score, mask in zip(scores, masks): - output_scores += score * mask - - max_val, min_val = np.max(output_scores), np.min(output_scores) - if max_val > 0: - output_scores = (output_scores - min_val) / (max_val - min_val) - return output_scores.tolist() - - def style(self, *, height: int | None = None, width: int | None = None, **kwargs): - """ - This method can be used to change the appearance of the Image component. - Parameters: - height: Height of the image. - width: Width of the image. - """ - self._style["height"] = height - self._style["width"] = width - return Component.style( - self, - **kwargs, - ) - - def stream( - self, - fn: Callable, - inputs: List[Component], - outputs: List[Component], - _js: str | None = None, - api_name: str | None = None, - preprocess: bool = True, - postprocess: bool = True, - ): - """ - This event is triggered when the user streams the component (e.g. a live webcam - component) - Parameters: - fn: Callable function - inputs: List of inputs - outputs: List of outputs - """ - # js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - if self.source != "webcam": - raise ValueError("Image streaming only available if source is 'webcam'.") - Streamable.stream( - self, - fn, - inputs, - outputs, - _js=_js, - api_name=api_name, - preprocess=preprocess, - postprocess=postprocess, - ) - - def as_example(self, input_data: str | None) -> str: - return "" if input_data is None else str(Path(input_data).resolve()) - - -@document("change", "clear", "play", "pause", "stop", "style") -class Video( - Changeable, - Clearable, - Playable, - Uploadable, - IOComponent, - FileSerializable, - TempFileManager, -): - """ - Creates a video component that can be used to upload/record videos (as an input) or display videos (as an output). - For the video to be playable in the browser it must have a compatible container and codec combination. Allowed - combinations are .mp4 with h264 codec, .ogg with theora codec, and .webm with vp9 codec. If the component detects - that the output video would not be playable in the browser it will attempt to convert it to a playable mp4 video. - If the conversion fails, the original video is returned. - Preprocessing: passes the uploaded video as a {str} filepath or URL whose extension can be modified by `format`. - Postprocessing: expects a {str} filepath to a video which is displayed. - Examples-format: a {str} filepath to a local file that contains the video. - Demos: video_identity - """ - - def __init__( - self, - value: str | Callable | None = None, - *, - format: str | None = None, - source: str = "upload", - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - mirror_webcam: bool = True, - include_audio: bool | None = None, - **kwargs, - ): - """ - Parameters: - value: A path or URL for the default value that Video component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component. - format: Format of video format to be returned by component, such as 'avi' or 'mp4'. Use 'mp4' to ensure browser playability. If set to None, video will keep uploaded format. - source: Source of video. "upload" creates a box where user can drop an video file, "webcam" allows user to record a video from their webcam. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - mirror_webcam: If True webcam will be mirrored. Default is True. - include_audio: Whether the component should record/retain the audio track for a video. By default, audio is excluded for webcam videos and included for uploaded videos. - """ - self.format = format - valid_sources = ["upload", "webcam"] - if source not in valid_sources: - raise ValueError( - f"Invalid value for parameter `source`: {source}. Please choose from one of: {valid_sources}" - ) - self.source = source - self.mirror_webcam = mirror_webcam - self.include_audio = ( - include_audio if include_audio is not None else source == "upload" - ) - TempFileManager.__init__(self) - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "source": self.source, - "value": self.value, - "mirror_webcam": self.mirror_webcam, - "include_audio": self.include_audio, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - source: str | None = None, - label: str | None = None, - show_label: bool | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "source": source, - "label": label, - "show_label": show_label, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def preprocess(self, x: Dict[str, str] | None) -> str | None: - """ - Parameters: - x: a dictionary with the following keys: 'name' (containing the file path to a video), 'data' (with either the file URL or base64 representation of the video), and 'is_file` (True if `data` contains the file URL). - Returns: - a string file path to the preprocessed video - """ - if x is None: - return x - - file_name, file_data, is_file = ( - x["name"], - x["data"], - x.get("is_file", False), - ) - if is_file: - file = self.make_temp_copy_if_needed(file_name) - file_name = Path(file) - else: - file = processing_utils.decode_base64_to_file( - file_data, file_path=file_name - ) - file_name = Path(file.name) - - uploaded_format = file_name.suffix.replace(".", "") - modify_format = self.format is not None and uploaded_format != self.format - flip = self.source == "webcam" and self.mirror_webcam - if modify_format or flip: - format = f".{self.format if modify_format else uploaded_format}" - output_options = ["-vf", "hflip", "-c:a", "copy"] if flip else [] - output_options += ["-an"] if not self.include_audio else [] - flip_suffix = "_flip" if flip else "" - output_file_name = str( - file_name.with_name(f"{file_name.stem}{flip_suffix}{format}") - ) - if Path(output_file_name).exists(): - return output_file_name - ff = FFmpeg( - inputs={str(file_name): None}, - outputs={output_file_name: output_options}, - ) - ff.run() - return output_file_name - elif not self.include_audio: - output_file_name = str(file_name.with_name(f"muted_{file_name.name}")) - ff = FFmpeg( - inputs={str(file_name): None}, - outputs={output_file_name: ["-an"]}, - ) - ff.run() - return output_file_name - else: - return str(file_name) - - def generate_sample(self): - """Generates a random video for testing the API.""" - return deepcopy(media_data.BASE64_VIDEO) - - def postprocess(self, y: str | None) -> Dict[str, Any] | None: - """ - Processes a video to ensure that it is in the correct format before - returning it to the front end. - Parameters: - y: a path or URL to the video file - Returns: - a dictionary with the following keys: 'name' (containing the file path - to a temporary copy of the video), 'data' (None), and 'is_file` (True). - """ - if y is None: - return None - - returned_format = y.split(".")[-1].lower() - - if self.format is None or returned_format == self.format: - conversion_needed = False - else: - conversion_needed = True - - # For cases where the video is a URL and does not need to be converted to another format, we can just return the URL - if utils.validate_url(y) and not (conversion_needed): - return {"name": y, "data": None, "is_file": True} - - # For cases where the video needs to be converted to another format - if utils.validate_url(y): - y = self.download_temp_copy_if_needed(y) - if ( - processing_utils.ffmpeg_installed() - and not processing_utils.video_is_playable(y) - ): - warnings.warn( - "Video does not have browser-compatible container or codec. Converting to mp4" - ) - y = processing_utils.convert_video_to_playable_mp4(y) - if self.format is not None and returned_format != self.format: - output_file_name = y[0 : y.rindex(".") + 1] + self.format - ff = FFmpeg(inputs={y: None}, outputs={output_file_name: None}) - ff.run() - y = output_file_name - - y = self.make_temp_copy_if_needed(y) - return {"name": y, "data": None, "is_file": True} - - def style(self, *, height: int | None = None, width: int | None = None, **kwargs): - """ - This method can be used to change the appearance of the video component. - Parameters: - height: Height of the video. - width: Width of the video. - """ - self._style["height"] = height - self._style["width"] = width - return Component.style( - self, - **kwargs, - ) - - -@document("change", "clear", "play", "pause", "stop", "stream", "style") -class Audio( - Changeable, - Clearable, - Playable, - Streamable, - Uploadable, - IOComponent, - FileSerializable, - TempFileManager, -): - """ - Creates an audio component that can be used to upload/record audio (as an input) or display audio (as an output). - Preprocessing: passes the uploaded audio as a {Tuple(int, numpy.array)} corresponding to (sample rate, data) or as a {str} filepath, depending on `type` - Postprocessing: expects a {Tuple(int, numpy.array)} corresponding to (sample rate, data) or as a {str} filepath or URL to an audio file, which gets displayed - Examples-format: a {str} filepath to a local file that contains audio. - Demos: main_note, generate_tone, reverse_audio - Guides: real_time_speech_recognition - """ - - def __init__( - self, - value: str | Tuple[int, np.ndarray] | Callable | None = None, - *, - source: str = "upload", - type: str = "numpy", - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - streaming: bool = False, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: A path, URL, or [sample_rate, numpy array] tuple for the default value that Audio component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component. - source: Source of audio. "upload" creates a box where user can drop an audio file, "microphone" creates a microphone input. - type: The format the audio file is converted to before being passed into the prediction function. "numpy" converts the audio to a tuple consisting of: (int sample rate, numpy.array for the data), "filepath" passes a str path to a temporary file containing the audio. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, will allow users to upload and edit a audio file; if False, can only be used to play audio. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - streaming: If set to True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'microphone'. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - valid_sources = ["upload", "microphone"] - if source not in valid_sources: - raise ValueError( - f"Invalid value for parameter `source`: {source}. Please choose from one of: {valid_sources}" - ) - self.source = source - valid_types = ["numpy", "filepath"] - if type not in valid_types: - raise ValueError( - f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" - ) - self.type = type - self.test_input = deepcopy(media_data.BASE64_AUDIO) - self.interpret_by_tokens = True - self.streaming = streaming - if streaming and source != "microphone": - raise ValueError( - "Audio streaming only available if source is 'microphone'." - ) - TempFileManager.__init__(self) - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "source": self.source, - "value": self.value, - "streaming": self.streaming, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - source: str | None = None, - label: str | None = None, - show_label: bool | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "source": source, - "label": label, - "show_label": show_label, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def preprocess( - self, x: Dict[str, Any] | None - ) -> Tuple[int, np.ndarray] | str | None: - """ - Parameters: - x: dictionary with keys "name", "data", "is_file", "crop_min", "crop_max". - Returns: - audio in requested format - """ - if x is None: - return x - file_name, file_data, is_file = ( - x["name"], - x["data"], - x.get("is_file", False), - ) - crop_min, crop_max = x.get("crop_min", 0), x.get("crop_max", 100) - if is_file: - if utils.validate_url(file_name): - temp_file_path = self.download_temp_copy_if_needed(file_name) - else: - temp_file_path = self.make_temp_copy_if_needed(file_name) - else: - temp_file_obj = processing_utils.decode_base64_to_file( - file_data, file_path=file_name - ) - temp_file_path = temp_file_obj.name - - sample_rate, data = processing_utils.audio_from_file( - temp_file_path, crop_min=crop_min, crop_max=crop_max - ) - - if self.type == "numpy": - return sample_rate, data - elif self.type == "filepath": - processing_utils.audio_to_file(sample_rate, data, temp_file_path) - return temp_file_path - else: - raise ValueError( - "Unknown type: " - + str(self.type) - + ". Please choose from: 'numpy', 'filepath'." - ) - - def set_interpret_parameters(self, segments: int = 8): - """ - Calculates interpretation score of audio subsections by splitting the audio into subsections, then using a "leave one out" method to calculate the score of each subsection by removing the subsection and measuring the delta of the output value. - Parameters: - segments: Number of interpretation segments to split audio into. - """ - self.interpretation_segments = segments - return self - - def tokenize(self, x): - if x.get("is_file"): - sample_rate, data = processing_utils.audio_from_file(x["name"]) - else: - file_obj = processing_utils.decode_base64_to_file(x["data"]) - sample_rate, data = processing_utils.audio_from_file(file_obj.name) - leave_one_out_sets = [] - tokens = [] - masks = [] - duration = data.shape[0] - boundaries = np.linspace(0, duration, self.interpretation_segments + 1).tolist() - boundaries = [round(boundary) for boundary in boundaries] - for index in range(len(boundaries) - 1): - start, stop = boundaries[index], boundaries[index + 1] - masks.append((start, stop)) - - # Handle the leave one outs - leave_one_out_data = np.copy(data) - leave_one_out_data[start:stop] = 0 - file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav") - processing_utils.audio_to_file(sample_rate, leave_one_out_data, file.name) - out_data = processing_utils.encode_file_to_base64(file.name) - leave_one_out_sets.append(out_data) - file.close() - Path(file.name).unlink() - - # Handle the tokens - token = np.copy(data) - token[0:start] = 0 - token[stop:] = 0 - file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav") - processing_utils.audio_to_file(sample_rate, token, file.name) - token_data = processing_utils.encode_file_to_base64(file.name) - file.close() - Path(file.name).unlink() - - tokens.append(token_data) - tokens = [{"name": "token.wav", "data": token} for token in tokens] - leave_one_out_sets = [ - {"name": "loo.wav", "data": loo_set} for loo_set in leave_one_out_sets - ] - return tokens, leave_one_out_sets, masks - - def get_masked_inputs(self, tokens, binary_mask_matrix): - # create a "zero input" vector and get sample rate - x = tokens[0]["data"] - file_obj = processing_utils.decode_base64_to_file(x) - sample_rate, data = processing_utils.audio_from_file(file_obj.name) - zero_input = np.zeros_like(data, dtype="int16") - # decode all of the tokens - token_data = [] - for token in tokens: - file_obj = processing_utils.decode_base64_to_file(token["data"]) - _, data = processing_utils.audio_from_file(file_obj.name) - token_data.append(data) - # construct the masked version - masked_inputs = [] - for binary_mask_vector in binary_mask_matrix: - masked_input = np.copy(zero_input) - for t, b in zip(token_data, binary_mask_vector): - masked_input = masked_input + t * int(b) - file = tempfile.NamedTemporaryFile(delete=False) - processing_utils.audio_to_file(sample_rate, masked_input, file.name) - masked_data = processing_utils.encode_file_to_base64(file.name) - file.close() - Path(file.name).unlink() - masked_inputs.append(masked_data) - return masked_inputs - - def get_interpretation_scores( - self, x, neighbors, scores, masks=None, tokens=None - ) -> List[float]: - """ - Returns: - Each value represents the interpretation score corresponding to an evenly spaced subsection of audio. - """ - return list(scores) - - def generate_sample(self): - return deepcopy(media_data.BASE64_AUDIO) - - def postprocess(self, y: Tuple[int, np.ndarray] | str | None) -> str | Dict | None: - """ - Parameters: - y: audio data in either of the following formats: a tuple of (sample_rate, data), or a string filepath or URL to an audio file, or None. - Returns: - base64 url data - """ - if y is None: - return None - if isinstance(y, str) and utils.validate_url(y): - return {"name": y, "data": None, "is_file": True} - if isinstance(y, tuple): - sample_rate, data = y - file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False) - processing_utils.audio_to_file(sample_rate, data, file.name) - file_path = file.name - self.temp_files.add(file_path) - else: - file_path = self.make_temp_copy_if_needed(y) - return {"name": file_path, "data": None, "is_file": True} - - def stream( - self, - fn: Callable, - inputs: List[Component], - outputs: List[Component], - _js: str | None = None, - api_name: str | None = None, - preprocess: bool = True, - postprocess: bool = True, - ): - """ - This event is triggered when the user streams the component (e.g. a live webcam - component) - Parameters: - fn: Callable function - inputs: List of inputs - outputs: List of outputs - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - if self.source != "microphone": - raise ValueError( - "Audio streaming only available if source is 'microphone'." - ) - Streamable.stream( - self, - fn, - inputs, - outputs, - _js=_js, - api_name=api_name, - preprocess=preprocess, - postprocess=postprocess, - ) - - def style( - self, - **kwargs, - ): - """ - This method can be used to change the appearance of the audio component. - """ - return Component.style( - self, - **kwargs, - ) - - def as_example(self, input_data: str | None) -> str: - return Path(input_data).name if input_data else "" - - -@document("change", "clear", "style") -class File( - Changeable, Clearable, Uploadable, IOComponent, FileSerializable, TempFileManager -): - """ - Creates a file component that allows uploading generic file (when used as an input) and or displaying generic files (output). - Preprocessing: passes the uploaded file as a {file-object} or {List[file-object]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`) - Postprocessing: expects function to return a {str} path to a file, or {List[str]} consisting of paths to files. - Examples-format: a {str} path to a local file that populates the component. - Demos: zip_to_json, zip_files - """ - - def __init__( - self, - value: str | List[str] | Callable | None = None, - *, - file_count: str = "single", - file_types: List[str] | None = None, - type: str = "file", - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: Default file to display, given as str file path. If callable, the function will be called whenever the app loads to set the initial value of the component. - file_count: if single, allows user to upload one file. If "multiple", user uploads multiple files. If "directory", user uploads all files in selected directory. Return type will be list for each file in case of "multiple" or "directory". - file_types: List of type of files to be uploaded. "file" allows any file to be uploaded, "image" allows only image files to be uploaded, "audio" allows only audio files to be uploaded, "video" allows only video files to be uploaded, "text" allows only text files to be uploaded. - type: Type of value to be returned by component. "file" returns a temporary file object whose path can be retrieved by file_obj.name and original filename can be retrieved with file_obj.orig_name, "binary" returns an bytes object. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, will allow users to upload a file; if False, can only be used to display files. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.file_count = file_count - self.file_types = file_types - valid_types = [ - "file", - "binary", - "bytes", - ] # "bytes" is included for backwards compatibility - if type not in valid_types: - raise ValueError( - f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" - ) - if type == "bytes": - warnings.warn( - "The `bytes` type is deprecated and may not work as expected. Please use `binary` instead." - ) - self.type = type - self.test_input = None - TempFileManager.__init__(self) - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "file_count": self.file_count, - "file_types": self.file_types, - "value": self.value, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "label": label, - "show_label": show_label, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def preprocess( - self, x: List[Dict[str, Any]] | None - ) -> bytes | tempfile._TemporaryFileWrapper | List[ - bytes | tempfile._TemporaryFileWrapper - ] | None: - """ - Parameters: - x: List of JSON objects with filename as 'name' property and base64 data as 'data' property - Returns: - File objects in requested format - """ - if x is None: - return None - - def process_single_file(f) -> bytes | tempfile._TemporaryFileWrapper: - file_name, data, is_file = ( - f["name"], - f["data"], - f.get("is_file", False), - ) - if self.type == "file": - if is_file: - temp_file_path = self.make_temp_copy_if_needed(file_name) - file = tempfile.NamedTemporaryFile(delete=False) - file.name = temp_file_path - file.orig_name = file_name # type: ignore - else: - file = processing_utils.decode_base64_to_file( - data, file_path=file_name - ) - file.orig_name = file_name # type: ignore - return file - elif ( - self.type == "binary" or self.type == "bytes" - ): # "bytes" is included for backwards compatibility - if is_file: - with open(file_name, "rb") as file_data: - return file_data.read() - return processing_utils.decode_base64_to_binary(data)[0] - else: - raise ValueError( - "Unknown type: " - + str(self.type) - + ". Please choose from: 'file', 'bytes'." - ) - - if self.file_count == "single": - if isinstance(x, list): - return process_single_file(x[0]) - else: - return process_single_file(x) - else: - if isinstance(x, list): - return [process_single_file(f) for f in x] - else: - return process_single_file(x) - - def generate_sample(self): - return deepcopy(media_data.BASE64_FILE) - - def postprocess( - self, y: str | List[str] | None - ) -> Dict[str, Any] | List[Dict[str, Any]] | None: - """ - Parameters: - y: file path - Returns: - JSON object with key 'name' for filename, 'data' for base64 url, and 'size' for filesize in bytes - """ - if y is None: - return None - if isinstance(y, list): - return [ - { - "orig_name": Path(file).name, - "name": self.make_temp_copy_if_needed(file), - "size": Path(file).stat().st_size, - "data": None, - "is_file": True, - } - for file in y - ] - else: - return { - "orig_name": Path(y).name, - "name": self.make_temp_copy_if_needed(y), - "size": Path(y).stat().st_size, - "data": None, - "is_file": True, - } - - def serialize( - self, x: str | None, load_dir: str = "", encryption_key: bytes | None = None - ) -> Dict | None: - serialized = FileSerializable.serialize(self, x, load_dir, encryption_key) - if serialized is None: - return None - serialized["size"] = Path(serialized["name"]).stat().st_size - return serialized - - def style( - self, - **kwargs, - ): - """ - This method can be used to change the appearance of the file component. - """ - return Component.style( - self, - **kwargs, - ) - - def as_example(self, input_data: str | List | None) -> str: - if input_data is None: - return "" - elif isinstance(input_data, list): - return ", ".join([Path(file).name for file in input_data]) - else: - return Path(input_data).name - - -@document("change", "style") -class Dataframe(Changeable, IOComponent, JSONSerializable): - """ - Accepts or displays 2D input through a spreadsheet-like component for dataframes. - Preprocessing: passes the uploaded spreadsheet data as a {pandas.DataFrame}, {numpy.array}, {List[List]}, or {List} depending on `type` - Postprocessing: expects a {pandas.DataFrame}, {numpy.array}, {List[List]}, {List}, a {Dict} with keys `data` (and optionally `headers`), or {str} path to a csv, which is rendered in the spreadsheet. - Examples-format: a {str} filepath to a csv with data, a pandas dataframe, or a list of lists (excluding headers) where each sublist is a row of data. - Demos: filter_records, matrix_transpose, tax_calculator - """ - - markdown_parser = None - - def __init__( - self, - value: List[List[Any]] | Callable | None = None, - *, - headers: List[str] | None = None, - row_count: int | Tuple[int, str] = (1, "dynamic"), - col_count: int | Tuple[int, str] | None = None, - datatype: str | List[str] = "str", - type: str = "pandas", - max_rows: int | None = 20, - max_cols: int | None = None, - overflow_row_behaviour: str = "paginate", - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - wrap: bool = False, - **kwargs, - ): - """ - Parameters: - value: Default value as a 2-dimensional list of values. If callable, the function will be called whenever the app loads to set the initial value of the component. - headers: List of str header names. If None, no headers are shown. - row_count: Limit number of rows for input and decide whether user can create new rows. The first element of the tuple is an `int`, the row count; the second should be 'fixed' or 'dynamic', the new row behaviour. If an `int` is passed the rows default to 'dynamic' - col_count: Limit number of columns for input and decide whether user can create new columns. The first element of the tuple is an `int`, the number of columns; the second should be 'fixed' or 'dynamic', the new column behaviour. If an `int` is passed the columns default to 'dynamic' - datatype: Datatype of values in sheet. Can be provided per column as a list of strings, or for the entire sheet as a single string. Valid datatypes are "str", "number", "bool", "date", and "markdown". - type: Type of value to be returned by component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for a Python array. - label: component name in interface. - max_rows: Maximum number of rows to display at once. Set to None for infinite. - max_cols: Maximum number of columns to display at once. Set to None for infinite. - overflow_row_behaviour: If set to "paginate", will create pages for overflow rows. If set to "show_ends", will show initial and final rows and truncate middle rows. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, will allow users to edit the dataframe; if False, can only be used to display data. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - wrap: if True text in table cells will wrap when appropriate, if False the table will scroll horiztonally. Defaults to False. - """ - - self.wrap = wrap - self.row_count = self.__process_counts(row_count) - self.col_count = self.__process_counts( - col_count, len(headers) if headers else 3 - ) - - self.__validate_headers(headers, self.col_count[0]) - - self.headers = ( - headers if headers is not None else list(range(1, self.col_count[0] + 1)) - ) - self.datatype = ( - datatype if isinstance(datatype, list) else [datatype] * self.col_count[0] - ) - valid_types = ["pandas", "numpy", "array"] - if type not in valid_types: - raise ValueError( - f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}" - ) - self.type = type - values = { - "str": "", - "number": 0, - "bool": False, - "date": "01/01/1970", - "markdown": "", - "html": "", - } - column_dtypes = ( - [datatype] * self.col_count[0] if isinstance(datatype, str) else datatype - ) - self.test_input = [ - [values[c] for c in column_dtypes] for _ in range(self.row_count[0]) - ] - - self.max_rows = max_rows - self.max_cols = max_cols - self.overflow_row_behaviour = overflow_row_behaviour - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "headers": self.headers, - "datatype": self.datatype, - "row_count": self.row_count, - "col_count": self.col_count, - "value": self.value, - "max_rows": self.max_rows, - "max_cols": self.max_cols, - "overflow_row_behaviour": self.overflow_row_behaviour, - "wrap": self.wrap, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - max_rows: int | None = None, - max_cols: str | None = None, - label: str | None = None, - show_label: bool | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "max_rows": max_rows, - "max_cols": max_cols, - "label": label, - "show_label": show_label, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def preprocess(self, x: DataframeData): - """ - Parameters: - x: 2D array of str, numeric, or bool data - Returns: - Dataframe in requested format - """ - if self.type == "pandas": - if x.get("headers") is not None: - return pd.DataFrame(x["data"], columns=x.get("headers")) - else: - return pd.DataFrame(x["data"]) - if self.type == "numpy": - return np.array(x["data"]) - elif self.type == "array": - return x["data"] - else: - raise ValueError( - "Unknown type: " - + str(self.type) - + ". Please choose from: 'pandas', 'numpy', 'array'." - ) - - def generate_sample(self): - return [[1, 2, 3], [4, 5, 6]] - - def postprocess( - self, y: str | pd.DataFrame | np.ndarray | List[List[str | float]] | Dict - ) -> Dict: - """ - Parameters: - y: dataframe in given format - Returns: - JSON object with key 'headers' for list of header names, 'data' for 2D array of string or numeric data - """ - if y is None: - return self.postprocess(self.test_input) - if isinstance(y, dict): - return y - if isinstance(y, str): - dataframe = pd.read_csv(y) - return { - "headers": list(dataframe.columns), - "data": Dataframe.__process_markdown( - dataframe.to_dict(orient="split")["data"], self.datatype - ), - } - if isinstance(y, pd.DataFrame): - return { - "headers": list(y.columns), # type: ignore - "data": Dataframe.__process_markdown( - y.to_dict(orient="split")["data"], self.datatype # type: ignore - ), - } - if isinstance(y, (np.ndarray, list)): - if isinstance(y, np.ndarray): - y = y.tolist() - assert isinstance(y, list), "output cannot be converted to list" - - _headers = self.headers - - if len(self.headers) < len(y[0]): - _headers = [ - *self.headers, - *list(range(len(self.headers) + 1, len(y[0]) + 1)), - ] - elif len(self.headers) > len(y[0]): - _headers = self.headers[: len(y[0])] - - return { - "headers": _headers, - "data": Dataframe.__process_markdown(y, self.datatype), - } - raise ValueError("Cannot process value as a Dataframe") - - @staticmethod - def __process_counts(count, default=3) -> Tuple[int, str]: - if count is None: - return (default, "dynamic") - if type(count) == int or type(count) == float: - return (int(count), "dynamic") - else: - return count - - @staticmethod - def __validate_headers(headers: List[str] | None, col_count: int): - if headers is not None and len(headers) != col_count: - raise ValueError( - "The length of the headers list must be equal to the col_count int.\nThe column count is set to {cols} but `headers` has {headers} items. Check the values passed to `col_count` and `headers`.".format( - cols=col_count, headers=len(headers) - ) - ) - - @classmethod - def __process_markdown(cls, data: List[List[Any]], datatype: List[str]): - if "markdown" not in datatype: - return data - - if cls.markdown_parser is None: - cls.markdown_parser = ( - MarkdownIt() - .use(dollarmath_plugin, renderer=utils.tex2svg, allow_digits=False) - .enable("table") - ) - - for i in range(len(data)): - for j in range(len(data[i])): - if datatype[j] == "markdown": - data[i][j] = cls.markdown_parser.render(data[i][j]) - - return data - - def style( - self, - **kwargs, - ): - """ - This method can be used to change the appearance of the DataFrame component. - """ - return Component.style( - self, - **kwargs, - ) - - def as_example(self, input_data: pd.DataFrame | np.ndarray | str | None): - if input_data is None: - return "" - elif isinstance(input_data, pd.DataFrame): - return input_data.head(n=5).to_dict(orient="split")["data"] # type: ignore - elif isinstance(input_data, np.ndarray): - return input_data.tolist() - return input_data - - -@document("change", "style") -class Timeseries(Changeable, IOComponent, JSONSerializable): - """ - Creates a component that can be used to upload/preview timeseries csv files or display a dataframe consisting of a time series graphically. - Preprocessing: passes the uploaded timeseries data as a {pandas.DataFrame} into the function - Postprocessing: expects a {pandas.DataFrame} or {str} path to a csv to be returned, which is then displayed as a timeseries graph - Examples-format: a {str} filepath of csv data with time series data. - Demos: fraud_detector - """ - - def __init__( - self, - value: str | Callable | None = None, - *, - x: str | None = None, - y: str | List[str] | None = None, - colors: List[str] | None = None, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: File path for the timeseries csv file. If callable, the function will be called whenever the app loads to set the initial value of the component. - x: Column name of x (time) series. None if csv has no headers, in which case first column is x series. - y: Column name of y series, or list of column names if multiple series. None if csv has no headers, in which case every column after first is a y series. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - colors: an ordered list of colors to use for each line plot - show_label: if True, will display label. - interactive: if True, will allow users to upload a timeseries csv; if False, can only be used to display timeseries data. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.x = x - if isinstance(y, str): - y = [y] - self.y = y - self.colors = colors - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "x": self.x, - "y": self.y, - "value": self.value, - "colors": self.colors, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - colors: List[str] | None = None, - label: str | None = None, - show_label: bool | None = None, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "colors": colors, - "label": label, - "show_label": show_label, - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def preprocess(self, x: Dict | None) -> pd.DataFrame | None: - """ - Parameters: - x: Dict with keys 'data': 2D array of str, numeric, or bool data, 'headers': list of strings for header names, 'range': optional two element list designating start of end of subrange. - Returns: - Dataframe of timeseries data - """ - if x is None: - return x - elif x.get("is_file"): - dataframe = pd.read_csv(x["name"]) - else: - dataframe = pd.DataFrame(data=x["data"], columns=x["headers"]) - if x.get("range") is not None: - dataframe = dataframe.loc[dataframe[self.x or 0] >= x["range"][0]] - dataframe = dataframe.loc[dataframe[self.x or 0] <= x["range"][1]] - return dataframe - - def generate_sample(self): - return { - "data": [[1] + [2] * len(self.y or [])] * 4, - "headers": [self.x] + (self.y or []), - } - - def postprocess(self, y: str | pd.DataFrame | None) -> Dict | None: - """ - Parameters: - y: csv or dataframe with timeseries data - Returns: - JSON object with key 'headers' for list of header names, 'data' for 2D array of string or numeric data - """ - if y is None: - return None - if isinstance(y, str): - dataframe = pd.read_csv(y) - return { - "headers": dataframe.columns.values.tolist(), - "data": dataframe.values.tolist(), - } - if isinstance(y, pd.DataFrame): - return {"headers": y.columns.values.tolist(), "data": y.values.tolist()} - raise ValueError("Cannot process value as Timeseries data") - - def style( - self, - **kwargs, - ): - """ - This method can be used to change the appearance of the TimeSeries component. - """ - return Component.style( - self, - **kwargs, - ) - - -@document() -class State(IOComponent, SimpleSerializable): - """ - Special hidden component that stores session state across runs of the demo by the - same user. The value of the State variable is cleared when the user refreshes the page. - - Preprocessing: No preprocessing is performed - Postprocessing: No postprocessing is performed - Demos: chatbot_demo, blocks_simple_squares - Guides: creating_a_chatbot, real_time_speech_recognition - """ - - allow_string_shortcut = False - - def __init__( - self, - value: Any = None, - **kwargs, - ): - """ - Parameters: - value: the initial value of the state. If callable, the function will be called whenever the app loads to set the initial value of the component. - """ - self.stateful = True - IOComponent.__init__(self, value=deepcopy(value), **kwargs) - - def style(self): - return self - - -class Variable(State): - """Variable was renamed to State. This class is kept for backwards compatibility.""" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def get_block_name(self): - return "state" - - -@document("click", "style") -class Button(Clickable, IOComponent, SimpleSerializable): - """ - Used to create a button, that can be assigned arbitrary click() events. The label (value) of the button can be used as an input or set via the output of a function. - - Preprocessing: passes the button value as a {str} into the function - Postprocessing: expects a {str} to be returned from a function, which is set as the label of the button - Demos: blocks_inputs, blocks_kinematics - """ - - def __init__( - self, - value: str | Callable = "Run", - *, - variant: str = "secondary", - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component. - variant: 'primary' for main call-to-action, 'secondary' for a more subdued style - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - IOComponent.__init__( - self, visible=visible, elem_id=elem_id, value=value, **kwargs - ) - self.variant = variant - - def get_config(self): - return { - "value": self.value, - "variant": self.variant, - **Component.get_config(self), - } - - @staticmethod - def update( - value: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - variant: str | None = None, - visible: bool | None = None, - ): - return { - "variant": variant, - "visible": visible, - "value": value, - "__type__": "update", - } - - def style(self, *, full_width: bool | None = None, **kwargs): - """ - This method can be used to change the appearance of the button component. - Parameters: - full_width: If True, will expand to fill parent container. - """ - if full_width is not None: - self._style["full_width"] = full_width - - return Component.style(self, **kwargs) - - -@document("click", "upload", "style") -class UploadButton( - Clickable, Uploadable, IOComponent, FileSerializable, TempFileManager -): - """ - Used to create an upload button, when cicked allows a user to upload files that satisfy the specified file type or generic files (if file_type not set). - Preprocessing: passes the uploaded file as a {file-object} or {List[file-object]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`) - Postprocessing: expects function to return a {str} path to a file, or {List[str]} consisting of paths to files. - Examples-format: a {str} path to a local file that populates the component. - Demos: upload_button - """ - - def __init__( - self, - label: str = "Upload a File", - value: str | List[str] | Callable | None = None, - *, - visible: bool = True, - elem_id: str | None = None, - type: str = "file", - file_count: str = "single", - file_types: List[str] | None = None, - **kwargs, - ): - """ - Parameters: - value: Default text for the button to display. - type: Type of value to be returned by component. "file" returns a temporary file object whose path can be retrieved by file_obj.name and original filename can be retrieved with file_obj.orig_name, "binary" returns an bytes object. - file_count: if single, allows user to upload one file. If "multiple", user uploads multiple files. If "directory", user uploads all files in selected directory. Return type will be list for each file in case of "multiple" or "directory". - file_types: List of type of files to be uploaded. "file" allows any file to be uploaded, "image" allows only image files to be uploaded, "audio" allows only audio files to be uploaded, "video" allows only video files to be uploaded, "text" allows only text files to be uploaded. - label: Text to display on the button. Defaults to "Upload a File". - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.type = type - self.file_count = file_count - self.file_types = file_types - self.label = label - TempFileManager.__init__(self) - IOComponent.__init__( - self, label=label, visible=visible, elem_id=elem_id, value=value, **kwargs - ) - - def get_config(self): - return { - "label": self.label, - "value": self.value, - "file_count": self.file_count, - "file_types": self.file_types, - **Component.get_config(self), - } - - @staticmethod - def update( - value: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - interactive: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "interactive": interactive, - "visible": visible, - "value": value, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def preprocess( - self, x: List[Dict[str, Any]] | None - ) -> bytes | tempfile._TemporaryFileWrapper | List[ - bytes | tempfile._TemporaryFileWrapper - ] | None: - """ - Parameters: - x: List of JSON objects with filename as 'name' property and base64 data as 'data' property - Returns: - File objects in requested format - """ - if x is None: - return None - - def process_single_file(f) -> bytes | tempfile._TemporaryFileWrapper: - file_name, data, is_file = ( - f["name"], - f["data"], - f.get("is_file", False), - ) - if self.type == "file": - if is_file: - temp_file_path = self.make_temp_copy_if_needed(file_name) - file = tempfile.NamedTemporaryFile(delete=False) - file.name = temp_file_path - file.orig_name = file_name # type: ignore - else: - file = processing_utils.decode_base64_to_file( - data, file_path=file_name - ) - file.orig_name = file_name # type: ignore - return file - elif self.type == "bytes": - if is_file: - with open(file_name, "rb") as file_data: - return file_data.read() - return processing_utils.decode_base64_to_binary(data)[0] - else: - raise ValueError( - "Unknown type: " - + str(self.type) - + ". Please choose from: 'file', 'bytes'." - ) - - if self.file_count == "single": - if isinstance(x, list): - return process_single_file(x[0]) - else: - return process_single_file(x) - else: - if isinstance(x, list): - return [process_single_file(f) for f in x] - else: - return process_single_file(x) - - def generate_sample(self): - return deepcopy(media_data.BASE64_FILE) - - def serialize( - self, x: str | None, load_dir: str = "", encryption_key: bytes | None = None - ) -> Dict | None: - serialized = FileSerializable.serialize(self, x, load_dir, encryption_key) - if serialized is None: - return None - serialized["size"] = Path(serialized["name"]).stat().st_size - return serialized - - def style(self, *, full_width: bool | None = None, **kwargs): - """ - This method can be used to change the appearance of the button component. - Parameters: - full_width: If True, will expand to fill parent container. - """ - if full_width is not None: - self._style["full_width"] = full_width - - return Component.style(self, **kwargs) - - -@document("change", "submit", "style") -class ColorPicker(Changeable, Submittable, IOComponent, SimpleSerializable): - """ - Creates a color picker for user to select a color as string input. - Preprocessing: passes selected color value as a {str} into the function. - Postprocessing: expects a {str} returned from function and sets color picker value to it. - Examples-format: a {str} with a hexadecimal representation of a color, e.g. "#ff0000" for red. - Demos: color_picker, color_generator - """ - - def __init__( - self, - value: str | Callable | None = None, - *, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - interactive: bool | None = None, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: default text to provide in color picker. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.cleared_value = "#000000" - self.test_input = value - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - interactive=interactive, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "value": self.value, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - interactive: bool | None = None, - ): - updated_config = { - "value": value, - "label": label, - "show_label": show_label, - "visible": visible, - "__type__": "update", - } - return IOComponent.add_interactive_to_config(updated_config, interactive) - - def preprocess(self, x: str | None) -> str | None: - """ - Any preprocessing needed to be performed on function input. - Parameters: - x: text - Returns: - text - """ - if x is None: - return None - else: - return str(x) - - def generate_sample(self) -> str: - return "#000000" - - def postprocess(self, y: str | None) -> str | None: - """ - Any postprocessing needed to be performed on function output. - Parameters: - y: text - Returns: - text - """ - if y is None: - return None - else: - return str(y) - - -############################ -# Only Output Components -############################ - - -@document("change", "style") -class Label(Changeable, IOComponent, JSONSerializable): - """ - Displays a classification label, along with confidence scores of top categories, if provided. - Preprocessing: this component does *not* accept input. - Postprocessing: expects a {Dict[str, float]} of classes and confidences, or {str} with just the class or an {int}/{float} for regression outputs, or a {str} path to a .json file containing a json dictionary in the structure produced by Label.postprocess(). - - Demos: main_note, titanic_survival - Guides: Gradio_and_ONNX_on_Hugging_Face, image_classification_in_pytorch, image_classification_in_tensorflow, image_classification_with_vision_transformers, building_a_pictionary_app - """ - - CONFIDENCES_KEY = "confidences" - - def __init__( - self, - value: Dict[str, float] | str | float | Callable | None = None, - *, - num_top_classes: int | None = None, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - visible: bool = True, - elem_id: str | None = None, - color: str | None = None, - **kwargs, - ): - """ - Parameters: - value: Default value to show in the component. If a str or number is provided, simply displays the string or number. If a {Dict[str, float]} of classes and confidences is provided, displays the top class on top and the `num_top_classes` below, along with their confidence bars. If callable, the function will be called whenever the app loads to set the initial value of the component. - num_top_classes: number of most confident classes to show. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - color: The background color of the label (either a valid css color name or hexadecimal string). - """ - self.num_top_classes = num_top_classes - self.color = color - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "num_top_classes": self.num_top_classes, - "value": self.value, - "color": self.color, - **IOComponent.get_config(self), - } - - def postprocess(self, y: Dict[str, float] | str | float | None) -> Dict | None: - """ - Parameters: - y: a dictionary mapping labels to confidence value, or just a string/numerical label by itself - Returns: - Object with key 'label' representing primary label, and key 'confidences' representing a list of label-confidence pairs - """ - if y is None or y == {}: - return None - if isinstance(y, str) and y.endswith(".json") and Path(y).exists(): - return self.serialize(y) - if isinstance(y, (str, float, int)): - return {"label": str(y)} - if isinstance(y, dict): - if "confidences" in y and isinstance(y["confidences"], dict): - y = y["confidences"] - y = {c["label"]: c["confidence"] for c in y} - sorted_pred = sorted(y.items(), key=operator.itemgetter(1), reverse=True) - if self.num_top_classes is not None: - sorted_pred = sorted_pred[: self.num_top_classes] - return { - "label": sorted_pred[0][0], - "confidences": [ - {"label": pred[0], "confidence": pred[1]} for pred in sorted_pred - ], - } - raise ValueError( - "The `Label` output interface expects one of: a string label, or an int label, a " - "float label, or a dictionary whose keys are labels and values are confidences. " - "Instead, got a {}".format(type(y)) - ) - - @staticmethod - def update( - value: Dict[str, float] - | str - | float - | Literal[_Keywords.NO_VALUE] - | None = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - color: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - ): - # If color is not specified (NO_VALUE) map it to None so that - # it gets filtered out in postprocess. This will mean the color - # will not be updated in the front-end - if color is _Keywords.NO_VALUE: - color = None - # If the color was specified by the developer as None - # Map is so that the color is updated to be transparent, - # e.g. no background default state. - elif color is None: - color = "transparent" - updated_config = { - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "color": color, - "__type__": "update", - } - return updated_config - - def style( - self, - *, - container: bool | None = None, - ): - """ - This method can be used to change the appearance of the label component. - Parameters: - container: If True, will add a container to the label - providing some extra padding around the border. - """ - return Component.style(self, container=container) - - -@document("change", "style") -class HighlightedText(Changeable, IOComponent, JSONSerializable): - """ - Displays text that contains spans that are highlighted by category or numerical value. - Preprocessing: this component does *not* accept input. - Postprocessing: expects a {List[Tuple[str, float | str]]]} consisting of spans of text and their associated labels, or a {Dict} with two keys: (1) "text" whose value is the complete text, and "entities", which is a list of dictionaries, each of which have the keys: "entity" (consisting of the entity label), "start" (the character index where the label starts), and "end" (the character index where the label ends). Entities should not overlap. - - Demos: diff_texts, text_analysis - Guides: named_entity_recognition - """ - - def __init__( - self, - value: List[Tuple[str, str | float | None]] | Dict | Callable | None = None, - *, - color_map: Dict[str, str] - | None = None, # Parameter moved to HighlightedText.style() - show_legend: bool = False, - combine_adjacent: bool = False, - adjacent_separator: str = "", - label: str | None = None, - every: float | None = None, - show_label: bool = True, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: Default value to show. If callable, the function will be called whenever the app loads to set the initial value of the component. - show_legend: whether to show span categories in a separate legend or inline. - combine_adjacent: If True, will merge the labels of adjacent tokens belonging to the same category. - adjacent_separator: Specifies the separator to be used between tokens if combine_adjacent is True. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.color_map = color_map - if color_map is not None: - warnings.warn( - "The 'color_map' parameter has been moved from the constructor to `HighlightedText.style()` ", - ) - self.show_legend = show_legend - self.combine_adjacent = combine_adjacent - self.adjacent_separator = adjacent_separator - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "color_map": self.color_map, - "show_legend": self.show_legend, - "value": self.value, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: List[Tuple[str, str | float | None]] - | Dict - | Literal[_Keywords.NO_VALUE] - | None, - color_map: Dict[str, str] | None = None, - show_legend: bool | None = None, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "color_map": color_map, - "show_legend": show_legend, - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "__type__": "update", - } - return updated_config - - def postprocess( - self, y: List[Tuple[str, str | float | None]] | Dict | None - ) -> List[Tuple[str, str | float | None]] | None: - """ - Parameters: - y: List of (word, category) tuples - Returns: - List of (word, category) tuples - """ - if y is None: - return None - if isinstance(y, dict): - try: - text = y["text"] - entities = y["entities"] - except KeyError: - raise ValueError( - "Expected a dictionary with keys 'text' and 'entities' for the value of the HighlightedText component." - ) - if len(entities) == 0: - y = [(text, None)] - else: - list_format = [] - index = 0 - entities = sorted(entities, key=lambda x: x["start"]) - for entity in entities: - list_format.append((text[index : entity["start"]], None)) - list_format.append( - (text[entity["start"] : entity["end"]], entity["entity"]) - ) - index = entity["end"] - list_format.append((text[index:], None)) - y = list_format - if self.combine_adjacent: - output = [] - running_text, running_category = None, None - for text, category in y: - if running_text is None: - running_text = text - running_category = category - elif category == running_category: - running_text += self.adjacent_separator + text - elif not text: - # Skip fully empty item, these get added in processing - # of dictionaries. - pass - else: - output.append((running_text, running_category)) - running_text = text - running_category = category - if running_text is not None: - output.append((running_text, running_category)) - return output - else: - return y - - def style( - self, - *, - color_map: Dict[str, str] | None = None, - container: bool | None = None, - **kwargs, - ): - """ - This method can be used to change the appearance of the HighlightedText component. - Parameters: - color_map: Map between category and respective colors. - container: If True, will place the component in a container - providing some extra padding around the border. - """ - if color_map is not None: - self._style["color_map"] = color_map - - return Component.style(self, container=container, **kwargs) - - -@document("change", "style") -class JSON(Changeable, IOComponent, JSONSerializable): - """ - Used to display arbitrary JSON output prettily. - Preprocessing: this component does *not* accept input. - Postprocessing: expects a valid JSON {str} -- or a {list} or {dict} that is JSON serializable. - - Demos: zip_to_json, blocks_xray - """ - - def __init__( - self, - value: str | Callable | None = None, - *, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: Default value. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "value": self.value, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - interactive: bool | None = None, - ): - updated_config = { - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "__type__": "update", - } - return updated_config - - def postprocess(self, y: Dict | List | str | None) -> Dict | List | None: - """ - Parameters: - y: JSON output - Returns: - JSON output - """ - if y is None: - return None - if isinstance(y, str): - return json.loads(y) - else: - return y - - def style(self, *, container: bool | None = None, **kwargs): - """ - This method can be used to change the appearance of the JSON component. - Parameters: - container: If True, will place the JSON in a container - providing some extra padding around the border. - """ - return Component.style(self, container=container, **kwargs) - - -@document("change") -class HTML(Changeable, IOComponent, SimpleSerializable): - """ - Used to display arbitrary HTML output. - Preprocessing: this component does *not* accept input. - Postprocessing: expects a valid HTML {str}. - - Demos: text_analysis - Guides: key_features - """ - - def __init__( - self, - value: str | Callable = "", - *, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: Default value. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "value": self.value, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "__type__": "update", - } - return updated_config - - def style(self): - return self - - -@document("style") -class Gallery(IOComponent, TempFileManager, FileSerializable): - """ - Used to display a list of images as a gallery that can be scrolled through. - Preprocessing: this component does *not* accept input. - Postprocessing: expects a list of images in any format, {List[numpy.array | PIL.Image | str]}, or a {List} of (image, {str} caption) tuples and displays them. - - Demos: fake_gan - """ - - def __init__( - self, - value: List[np.ndarray | _Image.Image | str] | Callable | None = None, - *, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: List of images to display in the gallery by default. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - TempFileManager.__init__(self) - super().__init__( - label=label, - every=every, - show_label=show_label, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "__type__": "update", - } - return updated_config - - def get_config(self): - return { - "value": self.value, - **IOComponent.get_config(self), - } - - def postprocess( - self, - y: List[np.ndarray | _Image.Image | str] - | List[Tuple[np.ndarray | _Image.Image | str, str]] - | None, - ) -> List[str]: - """ - Parameters: - y: list of images, or list of (image, caption) tuples - Returns: - list of string file paths to images in temp directory - """ - if y is None: - return [] - output = [] - for img in y: - caption = None - if isinstance(img, tuple) or isinstance(img, list): - img, caption = img - if isinstance(img, np.ndarray): - file = processing_utils.save_array_to_file(img) - file_path = str(Path(file.name).resolve()) - self.temp_files.add(file_path) - elif isinstance(img, _Image.Image): - file = processing_utils.save_pil_to_file(img) - file_path = str(Path(file.name).resolve()) - self.temp_files.add(file_path) - elif isinstance(img, str): - if utils.validate_url(img): - file_path = img - else: - file_path = self.make_temp_copy_if_needed(img) - else: - raise ValueError(f"Cannot process type as image: {type(img)}") - - if caption is not None: - output.append( - [{"name": file_path, "data": None, "is_file": True}, caption] - ) - else: - output.append({"name": file_path, "data": None, "is_file": True}) - - return output - - def style( - self, - *, - grid: int | Tuple | None = None, - height: str | None = None, - container: bool | None = None, - **kwargs, - ): - """ - This method can be used to change the appearance of the gallery component. - Parameters: - grid: Represents the number of images that should be shown in one row, for each of the six standard screen sizes (<576px, <768px, <992px, <1200px, <1400px, >1400px). if fewer that 6 are given then the last will be used for all subsequent breakpoints - height: Height of the gallery. - container: If True, will place gallery in a container - providing some extra padding around the border. - """ - if grid is not None: - self._style["grid"] = grid - if height is not None: - self._style["height"] = height - - return Component.style(self, container=container, **kwargs) - - def deserialize( - self, x: Any, save_dir: str = "", encryption_key: bytes | None = None - ) -> None | str: - if x is None: - return None - gallery_path = Path(save_dir) / str(uuid.uuid4()) - gallery_path.mkdir(exist_ok=True, parents=True) - captions = {} - for img_data in x: - if isinstance(img_data, list) or isinstance(img_data, tuple): - img_data, caption = img_data - else: - caption = None - name = FileSerializable.deserialize(self, img_data, gallery_path) - captions[name] = caption - captions_file = gallery_path / "captions.json" - with captions_file.open("w") as captions_json: - json.dump(captions, captions_json) - return str(gallery_path.resolve()) - - def serialize(self, x: Any, load_dir: str = "", called_directly: bool = False): - files = [] - captions_file = Path(x) / "captions.json" - with captions_file.open("r") as captions_json: - captions = json.load(captions_json) - for file_name, caption in captions.items(): - img = FileSerializable.serialize(self, file_name) - files.append([img, caption]) - return files - - -class Carousel(IOComponent, Changeable, SimpleSerializable): - """ - Deprecated Component - """ - - def __init__( - self, - *args, - **kwargs, - ): - raise DeprecationWarning( - "The Carousel component is deprecated. Please consider using the Gallery " - "component, which can be used to display images (and optional captions).", - ) - - -@document("change", "style") -class Chatbot(Changeable, IOComponent, JSONSerializable): - """ - Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images. - Preprocessing: this component does *not* accept input. - Postprocessing: expects a {List[Tuple[str, str]]}, a list of tuples with user inputs and responses as strings of HTML. - - Demos: chatbot_demo - """ - - def __init__( - self, - value: List[Tuple[str, str]] | Callable | None = None, - color_map: Dict[str, str] | None = None, # Parameter moved to Chatbot.style() - *, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - if color_map is not None: - warnings.warn( - "The 'color_map' parameter has been moved from the constructor to `Chatbot.style()` ", - ) - self.color_map = color_map - self.md = MarkdownIt() - - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "value": self.value, - "color_map": self.color_map, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - color_map: Tuple[str, str] | None = None, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "color_map": color_map, - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "__type__": "update", - } - return updated_config - - def postprocess(self, y: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - """ - Parameters: - y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. - Returns: - List of tuples representing the message and response. Each message and response will be a string of HTML. - """ - if y is None: - return [] - for i, (message, response) in enumerate(y): - y[i] = (self.md.render(message), self.md.render(response)) - return y - - def style(self, *, color_map: Tuple[str, str] | None = None, **kwargs): - """ - This method can be used to change the appearance of the Chatbot component. - Parameters: - color_map: Tuple containing colors to apply to user and response chat bubbles. - Returns: - - """ - if color_map is not None: - self._style["color_map"] = color_map - - return Component.style( - self, - **kwargs, - ) - - -@document("change", "edit", "clear", "style") -class Model3D( - Changeable, Editable, Clearable, IOComponent, FileSerializable, TempFileManager -): - """ - Component allows users to upload or view 3D Model files (.obj, .glb, or .gltf). - Preprocessing: This component passes the uploaded file as a {str} filepath. - Postprocessing: expects function to return a {str} path to a file of type (.obj, glb, or .gltf) - - Demos: model3D - Guides: how_to_use_3D_model_component - """ - - def __init__( - self, - value: str | Callable | None = None, - *, - clear_color: List[float] | None = None, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: path to (.obj, glb, or .gltf) file to show in model3D viewer. If callable, the function will be called whenever the app loads to set the initial value of the component. - clear_color: background color of scene - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.clear_color = clear_color or [0.2, 0.2, 0.2, 1.0] - TempFileManager.__init__(self) - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "clearColor": self.clear_color, - "value": self.value, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "__type__": "update", - } - return updated_config - - def preprocess(self, x: Dict[str, str] | None) -> str | None: - """ - Parameters: - x: JSON object with filename as 'name' property and base64 data as 'data' property - Returns: - string file path to temporary file with the 3D image model - """ - if x is None: - return x - file_name, file_data, is_file = ( - x["name"], - x["data"], - x.get("is_file", False), - ) - if is_file: - temp_file_path = self.make_temp_copy_if_needed(file_name) - else: - temp_file = processing_utils.decode_base64_to_file( - file_data, file_path=file_name - ) - temp_file_path = temp_file.name - - return temp_file_path - - def generate_sample(self): - return media_data.BASE64_MODEL3D - - def postprocess(self, y: str | None) -> Dict[str, str] | None: - """ - Parameters: - y: path to the model - Returns: - file name mapped to base64 url data - """ - if y is None: - return y - data = { - "name": self.make_temp_copy_if_needed(y), - "data": None, - "is_file": True, - } - return data - - def style(self, **kwargs): - """ - This method can be used to change the appearance of the Model3D component. - """ - return Component.style( - self, - **kwargs, - ) - - def as_example(self, input_data: str | None) -> str: - return Path(input_data).name if input_data else "" - - -@document("change", "clear") -class Plot(Changeable, Clearable, IOComponent, JSONSerializable): - """ - Used to display various kinds of plots (matplotlib, plotly, or bokeh are supported) - Preprocessing: this component does *not* accept input. - Postprocessing: expects either a {matplotlib.figure.Figure}, a {plotly.graph_objects._figure.Figure}, or a {dict} corresponding to a bokeh plot (json_item format) - - Demos: altair_plot, outbreak_forecast, blocks_kinematics, stock_forecast, map_airbnb - Guides: plot_component_for_maps - """ - - def __init__( - self, - value: Callable | None | pd.DataFrame = None, - *, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: Optionally, supply a default plot object to display, must be a matplotlib, plotly, altair, or bokeh figure, or a callable. If callable, the function will be called whenever the app loads to set the initial value of the component. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - visible=visible, - elem_id=elem_id, - value=value, - **kwargs, - ) - - def get_config(self): - return {"value": self.value, **IOComponent.get_config(self)} - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - ): - updated_config = { - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "__type__": "update", - } - return updated_config - - def postprocess(self, y) -> Dict[str, str] | None: - """ - Parameters: - y: plot data - Returns: - plot type mapped to plot base64 data - """ - if y is None: - return None - if isinstance(y, (ModuleType, matplotlib.figure.Figure)): - dtype = "matplotlib" - out_y = processing_utils.encode_plot_to_base64(y) - elif isinstance(y, dict): - dtype = "bokeh" - out_y = json.dumps(y) - else: - is_altair = "altair" in y.__module__ - if is_altair: - dtype = "altair" - else: - dtype = "plotly" - out_y = y.to_json() - return {"type": dtype, "plot": out_y} - - def style(self, container: bool | None = None): - return Component.style( - self, - container=container, - ) - - -class AltairPlot: - @staticmethod - def create_legend(position, title): - if position == "none": - legend = None - else: - position = {"orient": position} if position else {} - legend = {"title": title, **position} - - return legend - - @staticmethod - def create_scale(limit): - return alt.Scale(domain=limit) if limit else alt.Undefined - - -@document("change", "clear") -class ScatterPlot(Plot): - """ - Create a scatter plot. - - Preprocessing: this component does *not* accept input. - Postprocessing: expects a pandas dataframe with the data to plot. - - Demos: native_plots - Guides: creating_a_dashboard_from_bigquery_data - """ - - def __init__( - self, - value: pd.DataFrame | Callable | None = None, - x: str | None = None, - y: str | None = None, - *, - color: str | None = None, - size: str | None = None, - shape: str | None = None, - title: str | None = None, - tooltip: List[str] | str | None = None, - x_title: str | None = None, - y_title: str | None = None, - color_legend_title: str | None = None, - size_legend_title: str | None = None, - shape_legend_title: str | None = None, - color_legend_position: str | None = None, - size_legend_position: str | None = None, - shape_legend_position: str | None = None, - height: int | None = None, - width: int | None = None, - x_lim: List[int | float] | None = None, - y_lim: List[int | float] | None = None, - caption: str | None = None, - interactive: bool | None = True, - label: str | None = None, - every: float | None = None, - show_label: bool = True, - visible: bool = True, - elem_id: str | None = None, - ): - """ - Parameters: - value: The pandas dataframe containing the data to display in a scatter plot, or a callable. If callable, the function will be called whenever the app loads to set the initial value of the component. - x: Column corresponding to the x axis. - y: Column corresponding to the y axis. - color: The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values. - size: The column used to determine the point size. Should contain numeric data so that gradio can map the data to the point size. - shape: The column used to determine the point shape. Should contain categorical data. Gradio will map each unique value to a different shape. - title: The title to display on top of the chart. - tooltip: The column (or list of columns) to display on the tooltip when a user hovers a point on the plot. - x_title: The title given to the x axis. By default, uses the value of the x parameter. - y_title: The title given to the y axis. By default, uses the value of the y parameter. - color_legend_title: The title given to the color legend. By default, uses the value of color parameter. - size_legend_title: The title given to the size legend. By default, uses the value of the size parameter. - shape_legend_title: The title given to the shape legend. By default, uses the value of the shape parameter. - color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation. - size_legend_position: The position of the size legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation. - shape_legend_position: The position of the shape legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation. - height: The height of the plot in pixels. - width: The width of the plot in pixels. - x_lim: A tuple or list containing the limits for the x-axis, specified as [x_min, x_max]. - y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max]. - caption: The (optional) caption to display below the plot. - interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad. - label: The (optional) label to display on the top left corner of the plot. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: Whether the label should be displayed. - visible: Whether the plot should be visible. - elem_id: Unique id used for custom css targetting. - """ - self.x = x - self.y = y - self.color = color - self.size = size - self.shape = shape - self.tooltip = tooltip - self.title = title - self.x_title = x_title - self.y_title = y_title - self.color_legend_title = color_legend_title - self.color_legend_position = color_legend_position - self.size_legend_title = size_legend_title - self.size_legend_position = size_legend_position - self.shape_legend_title = shape_legend_title - self.shape_legend_position = shape_legend_position - self.caption = caption - self.interactive_chart = interactive - self.width = width - self.height = height - self.x_lim = x_lim - self.y_lim = y_lim - super().__init__( - value=value, - label=label, - every=every, - show_label=show_label, - visible=visible, - elem_id=elem_id, - ) - - def get_config(self): - config = super().get_config() - config["caption"] = self.caption - return config - - def get_block_name(self) -> str: - return "plot" - - @staticmethod - def update( - value: DataFrame | Dict | Literal[_Keywords.NO_VALUE] = _Keywords.NO_VALUE, - x: str | None = None, - y: str | None = None, - color: str | None = None, - size: str | None = None, - shape: str | None = None, - title: str | None = None, - tooltip: List[str] | str | None = None, - x_title: str | None = None, - y_title: str | None = None, - color_legend_title: str | None = None, - size_legend_title: str | None = None, - shape_legend_title: str | None = None, - color_legend_position: str | None = None, - size_legend_position: str | None = None, - shape_legend_position: str | None = None, - height: int | None = None, - width: int | None = None, - x_lim: List[int | float] | None = None, - y_lim: List[int | float] | None = None, - interactive: bool | None = None, - caption: str | None = None, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - ): - """Update an existing plot component. - - If updating any of the plot properties (color, size, etc) the value, x, and y parameters must be specified. - - Parameters: - value: The pandas dataframe containing the data to display in a scatter plot. - x: Column corresponding to the x axis. - y: Column corresponding to the y axis. - color: The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values. - size: The column used to determine the point size. Should contain numeric data so that gradio can map the data to the point size. - shape: The column used to determine the point shape. Should contain categorical data. Gradio will map each unique value to a different shape. - title: The title to display on top of the chart. - tooltip: The column (or list of columns) to display on the tooltip when a user hovers a point on the plot. - x_title: The title given to the x axis. By default, uses the value of the x parameter. - y_title: The title given to the y axis. By default, uses the value of the y parameter. - color_legend_title: The title given to the color legend. By default, uses the value of color parameter. - size_legend_title: The title given to the size legend. By default, uses the value of the size parameter. - shape_legend_title: The title given to the shape legend. By default, uses the value of the shape parameter. - color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation. - size_legend_position: The position of the size legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation. - shape_legend_position: The position of the shape legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation. - height: The height of the plot in pixels. - width: The width of the plot in pixels. - x_lim: A tuple or list containing the limits for the x-axis, specified as [x_min, x_max]. - y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max]. - interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad. - caption: The (optional) caption to display below the plot. - label: The (optional) label to display in the top left corner of the plot. - show_label: Whether the label should be displayed. - visible: Whether the plot should be visible. - """ - properties = [ - x, - y, - color, - size, - shape, - title, - tooltip, - x_title, - y_title, - color_legend_title, - size_legend_title, - shape_legend_title, - color_legend_position, - size_legend_position, - shape_legend_position, - interactive, - height, - width, - x_lim, - y_lim, - ] - if any(properties): - if not isinstance(value, pd.DataFrame): - raise ValueError( - "In order to update plot properties the value parameter " - "must be provided, and it must be a Dataframe. Please pass a value " - "parameter to gr.ScatterPlot.update." - ) - if x is None or y is None: - raise ValueError( - "In order to update plot properties, the x and y axis data " - "must be specified. Please pass valid values for x an y to " - "gr.ScatterPlot.update." - ) - chart = ScatterPlot.create_plot(value, *properties) - value = {"type": "altair", "plot": chart.to_json(), "chart": "scatter"} - - updated_config = { - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "caption": caption, - "__type__": "update", - } - return updated_config - - @staticmethod - def create_plot( - value: pd.DataFrame, - x: str, - y: str, - color: str | None = None, - size: str | None = None, - shape: str | None = None, - title: str | None = None, - tooltip: List[str] | str | None = None, - x_title: str | None = None, - y_title: str | None = None, - color_legend_title: str | None = None, - size_legend_title: str | None = None, - shape_legend_title: str | None = None, - color_legend_position: str | None = None, - size_legend_position: str | None = None, - shape_legend_position: str | None = None, - height: int | None = None, - width: int | None = None, - x_lim: List[int | float] | None = None, - y_lim: List[int | float] | None = None, - interactive: bool | None = True, - ): - """Helper for creating the scatter plot.""" - interactive = True if interactive is None else interactive - encodings = dict( - x=alt.X( - x, # type: ignore - title=x_title or x, # type: ignore - scale=AltairPlot.create_scale(x_lim), # type: ignore - ), # ignore: type - y=alt.Y( - y, # type: ignore - title=y_title or y, # type: ignore - scale=AltairPlot.create_scale(y_lim), # type: ignore - ), - ) - properties = {} - if title: - properties["title"] = title - if height: - properties["height"] = height - if width: - properties["width"] = width - if color: - if is_numeric_dtype(value[color]): - domain = [value[color].min(), value[color].max()] - range_ = [0, 1] - type_ = "quantitative" - else: - domain = value[color].unique().tolist() - range_ = list(range(len(domain))) - type_ = "nominal" - - encodings["color"] = { - "field": color, - "type": type_, - "legend": AltairPlot.create_legend( - position=color_legend_position, title=color_legend_title or color - ), - "scale": {"domain": domain, "range": range_}, - } - if tooltip: - encodings["tooltip"] = tooltip - if size: - encodings["size"] = { - "field": size, - "type": "quantitative" if is_numeric_dtype(value[size]) else "nominal", - "legend": AltairPlot.create_legend( - position=size_legend_position, title=size_legend_title or size - ), - } - if shape: - encodings["shape"] = { - "field": shape, - "type": "quantitative" if is_numeric_dtype(value[shape]) else "nominal", - "legend": AltairPlot.create_legend( - position=shape_legend_position, title=shape_legend_title or shape - ), - } - chart = ( - alt.Chart(value) # type: ignore - .mark_point(clip=True) # type: ignore - .encode(**encodings) - .properties(background="transparent", **properties) - ) - if interactive: - chart = chart.interactive() - - return chart - - def postprocess(self, y: pd.DataFrame | Dict | None) -> Dict[str, str] | None: - # if None or update - if y is None or isinstance(y, Dict): - return y - if self.x is None or self.y is None: - raise ValueError("No value provided for required parameters `x` and `y`.") - chart = self.create_plot( - value=y, - x=self.x, - y=self.y, - color=self.color, - size=self.size, - shape=self.shape, - title=self.title, - tooltip=self.tooltip, - x_title=self.x_title, - y_title=self.y_title, - color_legend_title=self.color_legend_title, - size_legend_title=self.size_legend_title, - shape_legend_title=self.size_legend_title, - color_legend_position=self.color_legend_position, - size_legend_position=self.size_legend_position, - shape_legend_position=self.shape_legend_position, - interactive=self.interactive_chart, - height=self.height, - width=self.width, - x_lim=self.x_lim, - y_lim=self.y_lim, - ) - - return {"type": "altair", "plot": chart.to_json(), "chart": "scatter"} - - -@document("change", "clear") -class LinePlot(Plot): - """ - Create a line plot. - - Preprocessing: this component does *not* accept input. - Postprocessing: expects a pandas dataframe with the data to plot. - - Demos: native_plots, live_dashboard - """ - - def __init__( - self, - value: pd.DataFrame | Callable | None = None, - x: str | None = None, - y: str | None = None, - *, - color: str | None = None, - stroke_dash: str | None = None, - overlay_point: bool | None = None, - title: str | None = None, - tooltip: List[str] | str | None = None, - x_title: str | None = None, - y_title: str | None = None, - color_legend_title: str | None = None, - stroke_dash_legend_title: str | None = None, - color_legend_position: str | None = None, - stroke_dash_legend_position: str | None = None, - height: int | None = None, - width: int | None = None, - x_lim: List[int] | None = None, - y_lim: List[int] | None = None, - caption: str | None = None, - interactive: bool | None = True, - label: str | None = None, - show_label: bool = True, - every: float | None = None, - visible: bool = True, - elem_id: str | None = None, - ): - """ - Parameters: - value: The pandas dataframe containing the data to display in a scatter plot. - x: Column corresponding to the x axis. - y: Column corresponding to the y axis. - color: The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values. - stroke_dash: The column to determine the symbol used to draw the line, e.g. dashed lines, dashed lines with points. - overlay_point: Whether to draw a point on the line for each (x, y) coordinate pair. - title: The title to display on top of the chart. - tooltip: The column (or list of columns) to display on the tooltip when a user hovers a point on the plot. - x_title: The title given to the x axis. By default, uses the value of the x parameter. - y_title: The title given to the y axis. By default, uses the value of the y parameter. - color_legend_title: The title given to the color legend. By default, uses the value of color parameter. - stroke_dash_legend_title: The title given to the stroke_dash legend. By default, uses the value of the stroke_dash parameter. - color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation. - stroke_dash_legend_position: The position of the stoke_dash legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation. - height: The height of the plot in pixels. - width: The width of the plot in pixels. - x_lim: A tuple or list containing the limits for the x-axis, specified as [x_min, x_max]. - y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max]. - caption: The (optional) caption to display below the plot. - interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad. - label: The (optional) label to display on the top left corner of the plot. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: Whether the label should be displayed. - visible: Whether the plot should be visible. - elem_id: Unique id used for custom css targetting. - """ - self.x = x - self.y = y - self.color = color - self.stroke_dash = stroke_dash - self.tooltip = tooltip - self.title = title - self.x_title = x_title - self.y_title = y_title - self.color_legend_title = color_legend_title - self.stroke_dash_legend_title = stroke_dash_legend_title - self.color_legend_position = color_legend_position - self.stroke_dash_legend_position = stroke_dash_legend_position - self.overlay_point = overlay_point - self.x_lim = x_lim - self.y_lim = y_lim - self.caption = caption - self.interactive_chart = interactive - self.width = width - self.height = height - super().__init__( - value=value, - label=label, - show_label=show_label, - visible=visible, - elem_id=elem_id, - every=every, - ) - - def get_config(self): - config = super().get_config() - config["caption"] = self.caption - return config - - def get_block_name(self) -> str: - return "plot" - - @staticmethod - def update( - value: pd.DataFrame | Dict | Literal[_Keywords.NO_VALUE] = _Keywords.NO_VALUE, - x: str | None = None, - y: str | None = None, - color: str | None = None, - stroke_dash: str | None = None, - overlay_point: bool | None = None, - title: str | None = None, - tooltip: List[str] | str | None = None, - x_title: str | None = None, - y_title: str | None = None, - color_legend_title: str | None = None, - stroke_dash_legend_title: str | None = None, - color_legend_position: str | None = None, - stroke_dash_legend_position: str | None = None, - height: int | None = None, - width: int | None = None, - x_lim: List[int] | None = None, - y_lim: List[int] | None = None, - interactive: bool | None = None, - caption: str | None = None, - label: str | None = None, - show_label: bool | None = None, - visible: bool | None = None, - ): - """Update an existing plot component. - - If updating any of the plot properties (color, size, etc) the value, x, and y parameters must be specified. - - Parameters: - value: The pandas dataframe containing the data to display in a scatter plot. - x: Column corresponding to the x axis. - y: Column corresponding to the y axis. - color: The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values. - stroke_dash: The column to determine the symbol used to draw the line, e.g. dashed lines, dashed lines with points. - overlay_point: Whether to draw a point on the line for each (x, y) coordinate pair. - title: The title to display on top of the chart. - tooltip: The column (or list of columns) to display on the tooltip when a user hovers a point on the plot. - x_title: The title given to the x axis. By default, uses the value of the x parameter. - y_title: The title given to the y axis. By default, uses the value of the y parameter. - color_legend_title: The title given to the color legend. By default, uses the value of color parameter. - stroke_dash_legend_title: The title given to the stroke legend. By default, uses the value of stroke parameter. - color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation - stroke_dash_legend_position: The position of the stoke_dash legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation - height: The height of the plot in pixels. - width: The width of the plot in pixels. - x_lim: A tuple or list containing the limits for the x-axis, specified as [x_min, x_max]. - y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max]. - caption: The (optional) caption to display below the plot. - interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad. - label: The (optional) label to display in the top left corner of the plot. - show_label: Whether the label should be displayed. - visible: Whether the plot should be visible. - """ - properties = [ - x, - y, - color, - stroke_dash, - overlay_point, - title, - tooltip, - x_title, - y_title, - color_legend_title, - stroke_dash_legend_title, - color_legend_position, - stroke_dash_legend_position, - height, - width, - x_lim, - y_lim, - interactive, - ] - if any(properties): - if not isinstance(value, pd.DataFrame): - raise ValueError( - "In order to update plot properties the value parameter " - "must be provided, and it must be a Dataframe. Please pass a value " - "parameter to gr.LinePlot.update." - ) - if x is None or y is None: - raise ValueError( - "In order to update plot properties, the x and y axis data " - "must be specified. Please pass valid values for x an y to " - "gr.LinePlot.update." - ) - chart = LinePlot.create_plot(value, *properties) - value = {"type": "altair", "plot": chart.to_json(), "chart": "line"} - - updated_config = { - "label": label, - "show_label": show_label, - "visible": visible, - "value": value, - "caption": caption, - "__type__": "update", - } - return updated_config - - @staticmethod - def create_plot( - value: pd.DataFrame, - x: str, - y: str, - color: str | None = None, - stroke_dash: str | None = None, - overlay_point: bool | None = None, - title: str | None = None, - tooltip: List[str] | str | None = None, - x_title: str | None = None, - y_title: str | None = None, - color_legend_title: str | None = None, - stroke_dash_legend_title: str | None = None, - color_legend_position: str | None = None, - stroke_dash_legend_position: str | None = None, - height: int | None = None, - width: int | None = None, - x_lim: List[int] | None = None, - y_lim: List[int] | None = None, - interactive: bool | None = None, - ): - """Helper for creating the scatter plot.""" - interactive = True if interactive is None else interactive - encodings = dict( - x=alt.X( - x, # type: ignore - title=x_title or x, # type: ignore - scale=AltairPlot.create_scale(x_lim), # type: ignore - ), - y=alt.Y( - y, # type: ignore - title=y_title or y, # type: ignore - scale=AltairPlot.create_scale(y_lim), # type: ignore - ), - ) - properties = {} - if title: - properties["title"] = title - if height: - properties["height"] = height - if width: - properties["width"] = width - - if color: - domain = value[color].unique().tolist() - range_ = list(range(len(domain))) - encodings["color"] = { - "field": color, - "type": "nominal", - "scale": {"domain": domain, "range": range_}, - "legend": AltairPlot.create_legend( - position=color_legend_position, title=color_legend_title or color - ), - } - - highlight = None - if interactive and any([color, stroke_dash]): - highlight = alt.selection( - type="single", # type: ignore - on="mouseover", - fields=[c for c in [color, stroke_dash] if c], - nearest=True, - ) - - if stroke_dash: - stroke_dash = { - "field": stroke_dash, # type: ignore - "legend": AltairPlot.create_legend( # type: ignore - position=stroke_dash_legend_position, # type: ignore - title=stroke_dash_legend_title or stroke_dash, # type: ignore - ), # type: ignore - } # type: ignore - else: - stroke_dash = alt.value(alt.Undefined) # type: ignore - - if tooltip: - encodings["tooltip"] = tooltip - - chart = alt.Chart(value).encode(**encodings) # type: ignore - - points = chart.mark_point(clip=True).encode( - opacity=alt.value(alt.Undefined) if overlay_point else alt.value(0), - ) - lines = chart.mark_line(clip=True).encode(strokeDash=stroke_dash) - - if highlight: - points = points.add_selection(highlight) - - lines = lines.encode( - size=alt.condition(highlight, alt.value(4), alt.value(1)), - ) - - chart = (lines + points).properties(background="transparent", **properties) - if interactive: - chart = chart.interactive() - - return chart - - def postprocess(self, y: pd.DataFrame | Dict | None) -> Dict[str, str] | None: - # if None or update - if y is None or isinstance(y, Dict): - return y - if self.x is None or self.y is None: - raise ValueError("No value provided for required parameters `x` and `y`.") - chart = self.create_plot( - value=y, - x=self.x, - y=self.y, - color=self.color, - overlay_point=self.overlay_point, - title=self.title, - tooltip=self.tooltip, - x_title=self.x_title, - y_title=self.y_title, - color_legend_title=self.color_legend_title, - color_legend_position=self.color_legend_position, - stroke_dash_legend_title=self.stroke_dash_legend_title, - stroke_dash_legend_position=self.stroke_dash_legend_position, - x_lim=self.x_lim, - y_lim=self.y_lim, - stroke_dash=self.stroke_dash, - interactive=self.interactive_chart, - height=self.height, - width=self.width, - ) - - return {"type": "altair", "plot": chart.to_json(), "chart": "line"} - - -@document("change") -class Markdown(IOComponent, Changeable, SimpleSerializable): - """ - Used to render arbitrary Markdown output. Can also render latex enclosed by dollar signs. - Preprocessing: this component does *not* accept input. - Postprocessing: expects a valid {str} that can be rendered as Markdown. - - Demos: blocks_hello, blocks_kinematics - Guides: key_features - """ - - def __init__( - self, - value: str | Callable = "", - *, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - value: Value to show in Markdown component. If callable, the function will be called whenever the app loads to set the initial value of the component. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.md = ( - MarkdownIt() - .use(dollarmath_plugin, renderer=utils.tex2svg, allow_digits=False) - .enable("table") - ) - IOComponent.__init__( - self, visible=visible, elem_id=elem_id, value=value, **kwargs - ) - - def postprocess(self, y: str | None) -> str | None: - """ - Parameters: - y: markdown representation - Returns: - HTML rendering of markdown - """ - if y is None: - return None - unindented_y = inspect.cleandoc(y) - return self.md.render(unindented_y) - - def get_config(self): - return { - "value": self.value, - **Component.get_config(self), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - visible: bool | None = None, - ): - updated_config = { - "visible": visible, - "value": value, - "__type__": "update", - } - return updated_config - - def style(self): - return self - - def as_example(self, input_data: str | None) -> str: - postprocessed = self.postprocess(input_data) - return postprocessed if postprocessed else "" - - -############################ -# Special Components -############################ - - -@document("click", "style") -class Dataset(Clickable, Component): - """ - Used to create an output widget for showing datasets. Used to render the examples - box. - Preprocessing: passes the selected sample either as a {list} of data (if type="value") or as an {int} index (if type="index") - Postprocessing: expects a {list} of {lists} corresponding to the dataset data. - """ - - def __init__( - self, - *, - label: str | None = None, - components: List[IOComponent] | List[str], - samples: List[List[Any]] | None = None, - headers: List[str] | None = None, - type: str = "values", - samples_per_page: int = 10, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - components: Which component types to show in this dataset widget, can be passed in as a list of string names or Components instances. The following components are supported in a Dataset: Audio, Checkbox, CheckboxGroup, ColorPicker, Dataframe, Dropdown, File, HTML, Image, Markdown, Model3D, Number, Radio, Slider, Textbox, TimeSeries, Video - samples: a nested list of samples. Each sublist within the outer list represents a data sample, and each element within the sublist represents an value for each component - headers: Column headers in the Dataset widget, should be the same len as components. If not provided, inferred from component labels - type: 'values' if clicking on a sample should pass the value of the sample, or "index" if it should pass the index of the sample - samples_per_page: how many examples to show per page. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - Component.__init__(self, visible=visible, elem_id=elem_id, **kwargs) - self.components = [get_component_instance(c, render=False) for c in components] - - # Narrow type to IOComponent - assert all( - [isinstance(c, IOComponent) for c in self.components] - ), "All components in a `Dataset` must be subclasses of `IOComponent`" - self.components = [c for c in self.components if isinstance(c, IOComponent)] - - self.samples = [[]] if samples is None else samples - for example in self.samples: - for i, (component, ex) in enumerate(zip(self.components, example)): - example[i] = component.as_example(ex) - self.type = type - self.label = label - if headers is not None: - self.headers = headers - elif all([c.label is None for c in self.components]): - self.headers = [] - else: - self.headers = [c.label or "" for c in self.components] - self.samples_per_page = samples_per_page - - def get_config(self): - return { - "components": [component.get_block_name() for component in self.components], - "headers": self.headers, - "samples": self.samples, - "type": self.type, - "label": self.label, - "samples_per_page": self.samples_per_page, - **Component.get_config(self), - } - - @staticmethod - def update( - samples: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - visible: bool | None = None, - label: str | None = None, - ): - return { - "samples": samples, - "visible": visible, - "label": label, - "__type__": "update", - } - - def preprocess(self, x: Any) -> Any: - """ - Any preprocessing needed to be performed on function input. - """ - if self.type == "index": - return x - elif self.type == "values": - return self.samples[x] - - def postprocess(self, samples: List[List[Any]]) -> Dict: - return { - "samples": samples, - "__type__": "update", - } - - def style(self, **kwargs): - """ - This method can be used to change the appearance of the Dataset component. - """ - return Component.style(self, **kwargs) - - -@document() -class Interpretation(Component): - """ - Used to create an interpretation widget for a component. - Preprocessing: this component does *not* accept input. - Postprocessing: expects a {dict} with keys "original" and "interpretation". - - Guides: custom_interpretations_with_blocks - """ - - def __init__( - self, - component: Component, - *, - visible: bool = True, - elem_id: str | None = None, - **kwargs, - ): - """ - Parameters: - component: Which component to show in the interpretation widget. - visible: Whether or not the interpretation is visible. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - Component.__init__(self, visible=visible, elem_id=elem_id, **kwargs) - self.component = component - - def get_config(self): - return { - "component": self.component.get_block_name(), - "component_props": self.component.get_config(), - } - - @staticmethod - def update( - value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE, - visible: bool | None = None, - ): - return { - "visible": visible, - "value": value, - "__type__": "update", - } - - def style(self): - return self - - -class StatusTracker(Component): - def __init__( - self, - **kwargs, - ): - warnings.warn("The StatusTracker component is deprecated.") - - -def component(cls_name: str) -> Component: - obj = utils.component_or_layout_class(cls_name)() - if isinstance(obj, BlockContext): - raise ValueError(f"Invalid component: {obj.__class__}") - return obj - - -def get_component_instance(comp: str | dict | Component, render=True) -> Component: - if isinstance(comp, str): - component_obj = component(comp) - if not (render): - component_obj.unrender() - return component_obj - elif isinstance(comp, dict): - name = comp.pop("name") - component_cls = utils.component_or_layout_class(name) - component_obj = component_cls(**comp) - if isinstance(component_obj, BlockContext): - raise ValueError(f"Invalid component: {name}") - if not (render): - component_obj.unrender() - return component_obj - elif isinstance(comp, Component): - return comp - else: - raise ValueError( - f"Component must provided as a `str` or `dict` or `Component` but is {comp}" - ) - - -Text = Textbox -DataFrame = Dataframe -Highlightedtext = HighlightedText -Highlight = HighlightedText -Checkboxgroup = CheckboxGroup -TimeSeries = Timeseries -Json = JSON diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/events.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/events.py deleted file mode 100644 index f0692c5d5f7e8f49d18fd25a7c5f60c9c033e418..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/events.py +++ /dev/null @@ -1,723 +0,0 @@ -"""Contains all of the events that can be triggered in a gr.Blocks() app, with the exception -of the on-page-load event, which is defined in gr.Blocks().load().""" - -from __future__ import annotations - -import warnings -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Set - -from gradio.blocks import Block -from gradio.utils import get_cancel_function - -if TYPE_CHECKING: # Only import for type checking (is False at runtime). - from gradio.components import Component, StatusTracker - - -def set_cancel_events( - block: Block, event_name: str, cancels: None | Dict[str, Any] | List[Dict[str, Any]] -): - if cancels: - if not isinstance(cancels, list): - cancels = [cancels] - cancel_fn, fn_indices_to_cancel = get_cancel_function(cancels) - block.set_event_trigger( - event_name, - cancel_fn, - inputs=None, - outputs=None, - queue=False, - preprocess=False, - cancels=fn_indices_to_cancel, - ) - - -class EventListener(Block): - pass - - -class Changeable(EventListener): - def change( - self, - fn: Callable | None, - inputs: Component | List[Component] | Set[Component] | None = None, - outputs: Component | List[Component] | None = None, - api_name: str | None = None, - status_tracker: StatusTracker | None = None, - scroll_to_output: bool = False, - show_progress: bool = True, - queue: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ): - """ - This event is triggered when the component's input value changes (e.g. when the user types in a textbox - or uploads an image). This method can be used when this component is in a Gradio Blocks. - - Parameters: - fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. - inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. - outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list. - api_name: Defining this parameter exposes the endpoint in the api docs - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue exists - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - if status_tracker: - warnings.warn( - "The 'status_tracker' parameter has been deprecated and has no effect." - ) - dep = self.set_event_trigger( - "change", - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - ) - set_cancel_events(self, "change", cancels) - return dep - - -class Clickable(EventListener): - def click( - self, - fn: Callable | None, - inputs: Component | List[Component] | Set[Component] | None = None, - outputs: Component | List[Component] | None = None, - api_name: str | None = None, - status_tracker: StatusTracker | None = None, - scroll_to_output: bool = False, - show_progress: bool = True, - queue=None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ): - """ - This event is triggered when the component (e.g. a button) is clicked. - This method can be used when this component is in a Gradio Blocks. - - Parameters: - fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. - inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. - outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list. - api_name: Defining this parameter exposes the endpoint in the api docs - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue exists - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - if status_tracker: - warnings.warn( - "The 'status_tracker' parameter has been deprecated and has no effect." - ) - - dep = self.set_event_trigger( - "click", - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - ) - set_cancel_events(self, "click", cancels) - return dep - - -class Submittable(EventListener): - def submit( - self, - fn: Callable | None, - inputs: Component | List[Component] | Set[Component] | None = None, - outputs: Component | List[Component] | None = None, - api_name: str | None = None, - status_tracker: StatusTracker | None = None, - scroll_to_output: bool = False, - show_progress: bool = True, - queue: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ): - """ - This event is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused. - This method can be used when this component is in a Gradio Blocks. - - - Parameters: - fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. - inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. - outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list. - api_name: Defining this parameter exposes the endpoint in the api docs - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue exists - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - if status_tracker: - warnings.warn( - "The 'status_tracker' parameter has been deprecated and has no effect." - ) - - dep = self.set_event_trigger( - "submit", - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - ) - set_cancel_events(self, "submit", cancels) - return dep - - -class Editable(EventListener): - def edit( - self, - fn: Callable | None, - inputs: Component | List[Component] | Set[Component] | None = None, - outputs: Component | List[Component] | None = None, - api_name: str | None = None, - status_tracker: StatusTracker | None = None, - scroll_to_output: bool = False, - show_progress: bool = True, - queue: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ): - """ - This event is triggered when the user edits the component (e.g. image) using the - built-in editor. This method can be used when this component is in a Gradio Blocks. - - Parameters: - fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. - inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. - outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list. - api_name: Defining this parameter exposes the endpoint in the api docs - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue exists - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - if status_tracker: - warnings.warn( - "The 'status_tracker' parameter has been deprecated and has no effect." - ) - - dep = self.set_event_trigger( - "edit", - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - ) - set_cancel_events(self, "edit", cancels) - return dep - - -class Clearable(EventListener): - def clear( - self, - fn: Callable | None, - inputs: Component | List[Component] | Set[Component] | None = None, - outputs: Component | List[Component] | None = None, - api_name: str | None = None, - status_tracker: StatusTracker | None = None, - scroll_to_output: bool = False, - show_progress: bool = True, - queue: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ): - """ - This event is triggered when the user clears the component (e.g. image or audio) - using the X button for the component. This method can be used when this component is in a Gradio Blocks. - - Parameters: - fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. - inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. - outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list. - api_name: Defining this parameter exposes the endpoint in the api docs - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue exists - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - if status_tracker: - warnings.warn( - "The 'status_tracker' parameter has been deprecated and has no effect." - ) - - dep = self.set_event_trigger( - "submit", - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - ) - set_cancel_events(self, "submit", cancels) - return dep - - -class Playable(EventListener): - def play( - self, - fn: Callable | None, - inputs: Component | List[Component] | Set[Component] | None = None, - outputs: Component | List[Component] | None = None, - api_name: str | None = None, - status_tracker: StatusTracker | None = None, - scroll_to_output: bool = False, - show_progress: bool = True, - queue: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ): - """ - This event is triggered when the user plays the component (e.g. audio or video). - This method can be used when this component is in a Gradio Blocks. - - Parameters: - fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. - inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. - outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list. - api_name: Defining this parameter exposes the endpoint in the api docs - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue exists - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - if status_tracker: - warnings.warn( - "The 'status_tracker' parameter has been deprecated and has no effect." - ) - - dep = self.set_event_trigger( - "play", - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - ) - set_cancel_events(self, "play", cancels) - return dep - - def pause( - self, - fn: Callable | None, - inputs: Component | List[Component] | Set[Component] | None = None, - outputs: Component | List[Component] | None = None, - api_name: str | None = None, - status_tracker: StatusTracker | None = None, - scroll_to_output: bool = False, - show_progress: bool = True, - queue: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ): - """ - This event is triggered when the user pauses the component (e.g. audio or video). - This method can be used when this component is in a Gradio Blocks. - - Parameters: - fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. - inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. - outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list. - api_name: Defining this parameter exposes the endpoint in the api docs - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue exists - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - if status_tracker: - warnings.warn( - "The 'status_tracker' parameter has been deprecated and has no effect." - ) - - dep = self.set_event_trigger( - "pause", - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - ) - set_cancel_events(self, "pause", cancels) - return dep - - def stop( - self, - fn: Callable | None, - inputs: Component | List[Component] | Set[Component] | None = None, - outputs: Component | List[Component] | None = None, - api_name: str | None = None, - status_tracker: StatusTracker | None = None, - scroll_to_output: bool = False, - show_progress: bool = True, - queue: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ): - """ - This event is triggered when the user stops the component (e.g. audio or video). - This method can be used when this component is in a Gradio Blocks. - - Parameters: - fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. - inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. - outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list. - api_name: Defining this parameter exposes the endpoint in the api docs - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue exists - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - if status_tracker: - warnings.warn( - "The 'status_tracker' parameter has been deprecated and has no effect." - ) - - dep = self.set_event_trigger( - "stop", - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - ) - set_cancel_events(self, "stop", cancels) - return dep - - -class Streamable(EventListener): - def stream( - self, - fn: Callable | None, - inputs: Component | List[Component] | Set[Component] | None = None, - outputs: Component | List[Component] | None = None, - api_name: str | None = None, - status_tracker: StatusTracker | None = None, - scroll_to_output: bool = False, - show_progress: bool = False, - queue: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ): - """ - This event is triggered when the user streams the component (e.g. a live webcam - component). This method can be used when this component is in a Gradio Blocks. - - Parameters: - fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component. - inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. - outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list. - api_name: Defining this parameter exposes the endpoint in the api docs - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue exists - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - self.streaming = True - - if status_tracker: - warnings.warn( - "The 'status_tracker' parameter has been deprecated and has no effect." - ) - - dep = self.set_event_trigger( - "stream", - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - ) - set_cancel_events(self, "stream", cancels) - return dep - - -class Blurrable(EventListener): - def blur( - self, - fn: Callable | None, - inputs: Component | List[Component] | Set[Component] | None = None, - outputs: Component | List[Component] | None = None, - api_name: str | None = None, - scroll_to_output: bool = False, - show_progress: bool = True, - queue: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ): - """ - This event is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks. - - Parameters: - fn: Callable function - inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list. - outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list. - api_name: Defining this parameter exposes the endpoint in the api docs - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue exists - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - - self.set_event_trigger( - "blur", - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - ) - set_cancel_events(self, "blur", cancels) - - -class Uploadable(EventListener): - def upload( - self, - fn: Callable | None, - inputs: List[Component], - outputs: Component | List[Component] | None = None, - api_name: str | None = None, - scroll_to_output: bool = False, - show_progress: bool = True, - queue: bool | None = None, - batch: bool = False, - max_batch_size: int = 4, - preprocess: bool = True, - postprocess: bool = True, - cancels: List[Dict[str, Any]] | None = None, - every: float | None = None, - _js: str | None = None, - ): - """ - This event is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks. - - Parameters: - fn: Callable function - inputs: List of inputs - outputs: List of outputs - api_name: Defining this parameter exposes the endpoint in the api docs - scroll_to_output: If True, will scroll to output component on completion - show_progress: If True, will show progress animation while pending - queue: If True, will place the request on the queue, if the queue exists - batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component. - max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True) - preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component). - postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser. - cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. - every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled. - """ - # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components. - - self.set_event_trigger( - "upload", - fn, - inputs, - outputs, - preprocess=preprocess, - postprocess=postprocess, - scroll_to_output=scroll_to_output, - show_progress=show_progress, - api_name=api_name, - js=_js, - queue=queue, - batch=batch, - max_batch_size=max_batch_size, - every=every, - ) - set_cancel_events(self, "upload", cancels) diff --git a/spaces/Hoshiyume/FixedStar-DebugChat/app.py b/spaces/Hoshiyume/FixedStar-DebugChat/app.py deleted file mode 100644 index f15b5354d5bcb579ff7343f25724a8c6b1515d81..0000000000000000000000000000000000000000 --- a/spaces/Hoshiyume/FixedStar-DebugChat/app.py +++ /dev/null @@ -1,76 +0,0 @@ -import gradio as gr -import itertools -from transformers import AutoTokenizer -import ctranslate2 - -generator = ctranslate2.Generator("./ct2-model" ) -tokenizer = AutoTokenizer.from_pretrained( "./models", use_fast=False) - -static_prompt="""ユーザー: We will now start chatting. If spoken to in English, answer in English; if spoken to in Japanese, answer in Japanese. Please take a deep breath and calm down and have a conversation. -システム: I'll try to keep calm and have a conversation. -ユーザー: その調子で頑張ってください。 -システム: 分かりました。 -""" -system_prompt_tokens=tokenizer.convert_ids_to_tokens(tokenizer.encode(static_prompt, add_special_tokens=False)) - -def inference_func(prompt, max_length=128, sampling_topk=40, sampling_topp=0.75, sampling_temperature=0.7, repetition_penalty=1.4): - tokens = tokenizer.convert_ids_to_tokens( tokenizer.encode(prompt, add_special_tokens=False)) - results = generator.generate_batch( - [tokens], - static_prompt=system_prompt_tokens, - max_length=max_length, - sampling_topk=sampling_topk, - sampling_topp=sampling_topp, - sampling_temperature=sampling_temperature, - repetition_penalty=repetition_penalty, - include_prompt_in_result=False, - ) - output = tokenizer.decode(results[0].sequences_ids[0]) - return output - -def make_prompt(message, chat_history, max_context_size: int = 10): - contexts = chat_history + [[message, ""]] - contexts = list(itertools.chain.from_iterable(contexts)) - if max_context_size > 0: - context_size = max_context_size - 1 - else: - context_size = 100000 - contexts = contexts[-context_size:] - prompt = [] - for idx, context in enumerate(reversed(contexts)): - if idx % 2 == 0: - prompt = [f"システム: {context}"] + prompt - else: - prompt = [f"ユーザー: {context}"] + prompt - prompt = "\n".join(prompt) - return prompt - -def interact_func(message, chat_history, max_context_size, max_length, sampling_topk, sampling_topp, sampling_temperature, repetition_penalty ): - prompt = make_prompt(message, chat_history, max_context_size) - print(f"prompt: {prompt}") - generated = inference_func(prompt, max_length, sampling_topk, sampling_topp, sampling_temperature, repetition_penalty ) - print(f"generated: {generated}") - chat_history.append((message, generated)) - return "", chat_history - -with gr.Blocks( theme="monochrome" ) as demo: - with gr.Accordion("Parameters", open=False): - # max_context_size = the number of turns * 2 - max_context_size = gr.Number(value=10, label="max_context_size", precision=0) - max_length = gr.Number(value=128, label="max_length", precision=0) - sampling_topk = gr.Slider(0, 1000, value=40, step=0.1, label="top_k") - sampling_topp = gr.Slider(0.1, 1.0, value=0.75, step=0.1, label="top_p") - sampling_temperature = gr.Slider(0.0, 10.0, value=0.7, step=0.1, label="temperature") - repetition_penalty = gr.Slider(0.0, 10.0, value=1.4, step=0.1, label="repetition_penalty") - chatbot = gr.Chatbot( show_copy_button=True, show_share_button="RETRY" ) - msg = gr.Textbox() - clear = gr.Button("RESET") - msg.submit( - interact_func, - [msg, chatbot, max_context_size, max_length, sampling_topk, sampling_topp, sampling_temperature, repetition_penalty], - [msg, chatbot], - ) - clear.click(lambda: None, None, chatbot, queue=False) - -if __name__ == "__main__": - demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/Hua626/QQsign/Dockerfile b/spaces/Hua626/QQsign/Dockerfile deleted file mode 100644 index 535624113f3b520e4829240a48bd3652430de828..0000000000000000000000000000000000000000 --- a/spaces/Hua626/QQsign/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM openjdk:17-slim - -# 设置时区 -ENV TZ Asia/Shanghai - -# 设置工作目录 -WORKDIR /app - -# 复制文件到工作目录 -COPY bin /app/bin -COPY lib /app/lib -COPY txlib /app/txlib - -# 设置命令 -RUN chmod -R 777 /tmp -RUN chmod -R 777 /app -RUN sed 's/"key": ".*"/"key": "'"$KEY_VALUE"'"/' txlib/$TXLIB_VERSION/config.json > /app/txlib/$TXLIB_VERSION/config.json - -# 运行 -CMD bash bin/unidbg-fetch-qsign --basePath=txlib/$TXLIB_VERSION - -# 暴露端口 -EXPOSE 7860 \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/ICML2022/OFA/fairseq/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 93c8668041f8a7af29e4c11e905d8b56b946dd51..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -name: 🚀 Feature Request -about: Submit a proposal/request for a new feature -labels: 'enhancement, help wanted, needs triage' ---- - -## 🚀 Feature Request - - -### Motivation - - - -### Pitch - - - -### Alternatives - - - -### Additional context - - diff --git a/spaces/ICML2022/OFA/fairseq/examples/bart/README.summarization.md b/spaces/ICML2022/OFA/fairseq/examples/bart/README.summarization.md deleted file mode 100644 index 8727584f2b2bdd880c6cd3abbf39b75dfbf4a67c..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/bart/README.summarization.md +++ /dev/null @@ -1,102 +0,0 @@ -# Fine-tuning BART on CNN-Dailymail summarization task - -### 1) Download the CNN and Daily Mail data and preprocess it into data files with non-tokenized cased samples. - -Follow the instructions [here](https://github.com/abisee/cnn-dailymail) to download the original CNN and Daily Mail datasets. To preprocess the data, refer to the pointers in [this issue](https://github.com/pytorch/fairseq/issues/1391) or check out the code [here](https://github.com/artmatsak/cnn-dailymail). - -Follow the instructions [here](https://github.com/EdinburghNLP/XSum) to download the original Extreme Summarization datasets, or check out the code [here](https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset), Please keep the raw dataset and make sure no tokenization nor BPE on the dataset. - -### 2) BPE preprocess: - -```bash -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json' -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe' -wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt' - -TASK=cnn_dm -for SPLIT in train val -do - for LANG in source target - do - python -m examples.roberta.multiprocessing_bpe_encoder \ - --encoder-json encoder.json \ - --vocab-bpe vocab.bpe \ - --inputs "$TASK/$SPLIT.$LANG" \ - --outputs "$TASK/$SPLIT.bpe.$LANG" \ - --workers 60 \ - --keep-empty; - done -done -``` - -### 3) Binarize dataset: -```bash -fairseq-preprocess \ - --source-lang "source" \ - --target-lang "target" \ - --trainpref "${TASK}/train.bpe" \ - --validpref "${TASK}/val.bpe" \ - --destdir "${TASK}-bin/" \ - --workers 60 \ - --srcdict dict.txt \ - --tgtdict dict.txt; -``` - -### 4) Fine-tuning on CNN-DM summarization task: -Example fine-tuning CNN-DM -```bash -TOTAL_NUM_UPDATES=20000 -WARMUP_UPDATES=500 -LR=3e-05 -MAX_TOKENS=2048 -UPDATE_FREQ=4 -BART_PATH=/path/to/bart/model.pt - -CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 fairseq-train cnn_dm-bin \ - --restore-file $BART_PATH \ - --max-tokens $MAX_TOKENS \ - --task translation \ - --source-lang source --target-lang target \ - --truncate-source \ - --layernorm-embedding \ - --share-all-embeddings \ - --share-decoder-input-output-embed \ - --reset-optimizer --reset-dataloader --reset-meters \ - --required-batch-size-multiple 1 \ - --arch bart_large \ - --criterion label_smoothed_cross_entropy \ - --label-smoothing 0.1 \ - --dropout 0.1 --attention-dropout 0.1 \ - --weight-decay 0.01 --optimizer adam --adam-betas "(0.9, 0.999)" --adam-eps 1e-08 \ - --clip-norm 0.1 \ - --lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \ - --fp16 --update-freq $UPDATE_FREQ \ - --skip-invalid-size-inputs-valid-test \ - --find-unused-parameters; -``` -Above is expected to run on `1` node with `8 32gb-V100`. -Expected training time is about `5 hours`. Training time can be reduced with distributed training on `4` nodes and `--update-freq 1`. - -Use TOTAL_NUM_UPDATES=15000 UPDATE_FREQ=2 for Xsum task - -### Inference for CNN-DM test data using above trained checkpoint. -After training the model as mentioned in previous step, you can perform inference with checkpoints in `checkpoints/` directory using `eval_cnn.py`, for example - -```bash -cp data-bin/cnn_dm/dict.source.txt checkpoints/ -python examples/bart/summarize.py \ - --model-dir checkpoints \ - --model-file checkpoint_best.pt \ - --src cnn_dm/test.source \ - --out cnn_dm/test.hypo -``` -For XSUM, which uses beam=6, lenpen=1.0, max_len_b=60, min_len=10: -```bash -cp data-bin/cnn_dm/dict.source.txt checkpoints/ -python examples/bart/summarize.py \ - --model-dir checkpoints \ - --model-file checkpoint_best.pt \ - --src cnn_dm/test.source \ - --out cnn_dm/test.hypo \ - --xsum-kwargs -``` diff --git a/spaces/ICML2022/OFA/fairseq/examples/roberta/README.glue.md b/spaces/ICML2022/OFA/fairseq/examples/roberta/README.glue.md deleted file mode 100644 index 4f596d55af99fba3cdf58b1d5ff3d8f8dbf4383d..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/roberta/README.glue.md +++ /dev/null @@ -1,64 +0,0 @@ -# Finetuning RoBERTa on GLUE tasks - -### 1) Download the data from GLUE website (https://gluebenchmark.com/tasks) using following commands: -```bash -wget https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py -python download_glue_data.py --data_dir glue_data --tasks all -``` - -### 2) Preprocess GLUE task data: -```bash -./examples/roberta/preprocess_GLUE_tasks.sh glue_data -``` -`glue_task_name` is one of the following: -`{ALL, QQP, MNLI, QNLI, MRPC, RTE, STS-B, SST-2, CoLA}` -Use `ALL` for preprocessing all the glue tasks. - -### 3) Fine-tuning on GLUE task: -Example fine-tuning cmd for `RTE` task -```bash -ROBERTA_PATH=/path/to/roberta/model.pt - -CUDA_VISIBLE_DEVICES=0 fairseq-hydra-train -config-dir examples/roberta/config/finetuning --config-name rte \ -task.data=RTE-bin checkpoint.restore_file=$ROBERTA_PATH -``` - -There are additional config files for each of the GLUE tasks in the examples/roberta/config/finetuning directory. - -**Note:** - -a) Above cmd-args and hyperparams are tested on one Nvidia `V100` GPU with `32gb` of memory for each task. Depending on the GPU memory resources available to you, you can use increase `--update-freq` and reduce `--batch-size`. - -b) All the settings in above table are suggested settings based on our hyperparam search within a fixed search space (for careful comparison across models). You might be able to find better metrics with wider hyperparam search. - -### Inference on GLUE task -After training the model as mentioned in previous step, you can perform inference with checkpoints in `checkpoints/` directory using following python code snippet: - -```python -from fairseq.models.roberta import RobertaModel - -roberta = RobertaModel.from_pretrained( - 'checkpoints/', - checkpoint_file='checkpoint_best.pt', - data_name_or_path='RTE-bin' -) - -label_fn = lambda label: roberta.task.label_dictionary.string( - [label + roberta.task.label_dictionary.nspecial] -) -ncorrect, nsamples = 0, 0 -roberta.cuda() -roberta.eval() -with open('glue_data/RTE/dev.tsv') as fin: - fin.readline() - for index, line in enumerate(fin): - tokens = line.strip().split('\t') - sent1, sent2, target = tokens[1], tokens[2], tokens[3] - tokens = roberta.encode(sent1, sent2) - prediction = roberta.predict('sentence_classification_head', tokens).argmax().item() - prediction_label = label_fn(prediction) - ncorrect += int(prediction_label == target) - nsamples += 1 -print('| Accuracy: ', float(ncorrect)/float(nsamples)) - -``` diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/pdb.py b/spaces/ICML2022/OFA/fairseq/fairseq/pdb.py deleted file mode 100644 index 1ba6ef0d336b30717cfdde94e1b838cfe2bfeb20..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/pdb.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import multiprocessing -import os -import pdb -import sys - - -__all__ = ["set_trace"] - - -_stdin = [None] -_stdin_lock = multiprocessing.Lock() -try: - _stdin_fd = sys.stdin.fileno() -except Exception: - _stdin_fd = None - - -class MultiprocessingPdb(pdb.Pdb): - """A Pdb wrapper that works in a multiprocessing environment. - - Usage: `from fairseq import pdb; pdb.set_trace()` - """ - - def __init__(self): - pdb.Pdb.__init__(self, nosigint=True) - - def _cmdloop(self): - stdin_bak = sys.stdin - with _stdin_lock: - try: - if _stdin_fd is not None: - if not _stdin[0]: - _stdin[0] = os.fdopen(_stdin_fd) - sys.stdin = _stdin[0] - self.cmdloop() - finally: - sys.stdin = stdin_bak - - -def set_trace(): - pdb = MultiprocessingPdb() - pdb.set_trace(sys._getframe().f_back) diff --git a/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/util.py b/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/util.py deleted file mode 100644 index e77a8150d81f67ee42885098bf5d9a52a2681669..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/util.py +++ /dev/null @@ -1,267 +0,0 @@ -# adopted from -# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py -# and -# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -# and -# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py -# -# thanks! - - -import os -import math -import torch -import torch.nn as nn -import numpy as np -from einops import repeat - -from ldm.util import instantiate_from_config - - -def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if schedule == "linear": - betas = ( - torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 - ) - - elif schedule == "cosine": - timesteps = ( - torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s - ) - alphas = timesteps / (1 + cosine_s) * np.pi / 2 - alphas = torch.cos(alphas).pow(2) - alphas = alphas / alphas[0] - betas = 1 - alphas[1:] / alphas[:-1] - betas = np.clip(betas, a_min=0, a_max=0.999) - - elif schedule == "sqrt_linear": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) - elif schedule == "sqrt": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 - else: - raise ValueError(f"schedule '{schedule}' unknown.") - return betas.numpy() - - -def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): - if ddim_discr_method == 'uniform': - c = num_ddpm_timesteps // num_ddim_timesteps - ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) - elif ddim_discr_method == 'quad': - ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) - else: - raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') - - # assert ddim_timesteps.shape[0] == num_ddim_timesteps - # add one to get the final alpha values right (the ones from first scale to data during sampling) - steps_out = ddim_timesteps - if verbose: - print(f'Selected timesteps for ddim sampler: {steps_out}') - return steps_out - - -def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): - # select alphas for computing the variance schedule - alphas = alphacums[ddim_timesteps] - alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) - - # according the the formula provided in https://arxiv.org/abs/2010.02502 - sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) - if verbose: - print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') - print(f'For the chosen value of eta, which is {eta}, ' - f'this results in the following sigma_t schedule for ddim sampler {sigmas}') - return sigmas, alphas, alphas_prev - - -def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, - which defines the cumulative product of (1-beta) over time from t = [0,1]. - :param num_diffusion_timesteps: the number of betas to produce. - :param alpha_bar: a lambda that takes an argument t from 0 to 1 and - produces the cumulative product of (1-beta) up to that - part of the diffusion process. - :param max_beta: the maximum beta to use; use values lower than 1 to - prevent singularities. - """ - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return np.array(betas) - - -def extract_into_tensor(a, t, x_shape): - b, *_ = t.shape - out = a.gather(-1, t) - return out.reshape(b, *((1,) * (len(x_shape) - 1))) - - -def checkpoint(func, inputs, params, flag): - """ - Evaluate a function without caching intermediate activations, allowing for - reduced memory at the expense of extra compute in the backward pass. - :param func: the function to evaluate. - :param inputs: the argument sequence to pass to `func`. - :param params: a sequence of parameters `func` depends on but does not - explicitly take as arguments. - :param flag: if False, disable gradient checkpointing. - """ - if flag: - args = tuple(inputs) + tuple(params) - return CheckpointFunction.apply(func, len(inputs), *args) - else: - return func(*inputs) - - -class CheckpointFunction(torch.autograd.Function): - @staticmethod - def forward(ctx, run_function, length, *args): - ctx.run_function = run_function - ctx.input_tensors = list(args[:length]) - ctx.input_params = list(args[length:]) - - with torch.no_grad(): - output_tensors = ctx.run_function(*ctx.input_tensors) - return output_tensors - - @staticmethod - def backward(ctx, *output_grads): - ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] - with torch.enable_grad(): - # Fixes a bug where the first op in run_function modifies the - # Tensor storage in place, which is not allowed for detach()'d - # Tensors. - shallow_copies = [x.view_as(x) for x in ctx.input_tensors] - output_tensors = ctx.run_function(*shallow_copies) - input_grads = torch.autograd.grad( - output_tensors, - ctx.input_tensors + ctx.input_params, - output_grads, - allow_unused=True, - ) - del ctx.input_tensors - del ctx.input_params - del output_tensors - return (None, None) + input_grads - - -def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): - """ - Create sinusoidal timestep embeddings. - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param dim: the dimension of the output. - :param max_period: controls the minimum frequency of the embeddings. - :return: an [N x dim] Tensor of positional embeddings. - """ - if not repeat_only: - half = dim // 2 - freqs = torch.exp( - -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half - ).to(device=timesteps.device) - args = timesteps[:, None].float() * freqs[None] - embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) - if dim % 2: - embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) - else: - embedding = repeat(timesteps, 'b -> b d', d=dim) - return embedding - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def scale_module(module, scale): - """ - Scale the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().mul_(scale) - return module - - -def mean_flat(tensor): - """ - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def normalization(channels, norm_channel=32): - """ - Make a standard normalization layer. - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - return GroupNorm32(norm_channel, channels) - - -# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. -class SiLU(nn.Module): - def forward(self, x): - return x * torch.sigmoid(x) - - -class GroupNorm32(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - -def conv_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D convolution module. - """ - if dims == 1: - return nn.Conv1d(*args, **kwargs) - elif dims == 2: - return nn.Conv2d(*args, **kwargs) - elif dims == 3: - return nn.Conv3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def linear(*args, **kwargs): - """ - Create a linear module. - """ - return nn.Linear(*args, **kwargs) - - -def avg_pool_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D average pooling module. - """ - if dims == 1: - return nn.AvgPool1d(*args, **kwargs) - elif dims == 2: - return nn.AvgPool2d(*args, **kwargs) - elif dims == 3: - return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -class HybridConditioner(nn.Module): - - def __init__(self, c_concat_config, c_crossattn_config): - super().__init__() - self.concat_conditioner = instantiate_from_config(c_concat_config) - self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) - - def forward(self, c_concat, c_crossattn): - c_concat = self.concat_conditioner(c_concat) - c_crossattn = self.crossattn_conditioner(c_crossattn) - return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} - - -def noise_like(shape, device, repeat=False): - repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) - noise = lambda: torch.randn(shape, device=device) - return repeat_noise() if repeat else noise() diff --git a/spaces/Inderdev07/facerecognition/README.md b/spaces/Inderdev07/facerecognition/README.md deleted file mode 100644 index faadfb62f9b5598d8afa02670f790839c6f16be5..0000000000000000000000000000000000000000 --- a/spaces/Inderdev07/facerecognition/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Facerecognition -emoji: ⚡ -colorFrom: pink -colorTo: indigo -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: cc ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/model_worker.py b/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/model_worker.py deleted file mode 100644 index 65aa2b726fd8de9b57bebdcd73ec4ee350f88af2..0000000000000000000000000000000000000000 --- a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/model_worker.py +++ /dev/null @@ -1,268 +0,0 @@ -""" -A model worker executes the model. -""" -import argparse -import asyncio -import dataclasses -import logging -import json -import os -import time -from typing import List, Union -import threading -import uuid - -from fastapi import FastAPI, Request, BackgroundTasks -from fastapi.responses import StreamingResponse -import requests - -try: - from transformers import ( - AutoTokenizer, - AutoModelForCausalLM, - LlamaTokenizer, - AutoModel, - ) -except ImportError: - from transformers import ( - AutoTokenizer, - AutoModelForCausalLM, - LLaMATokenizer, - AutoModel, - ) -import torch -import uvicorn - -from fastchat.constants import WORKER_HEART_BEAT_INTERVAL -from fastchat.serve.inference import load_model, generate_stream -from fastchat.serve.serve_chatglm import chatglm_generate_stream -from fastchat.utils import build_logger, server_error_msg, pretty_print_semaphore - -GB = 1 << 30 - -worker_id = str(uuid.uuid4())[:6] -logger = build_logger("model_worker", f"model_worker_{worker_id}.log") -global_counter = 0 - -model_semaphore = None - - -def heart_beat_worker(controller): - while True: - time.sleep(WORKER_HEART_BEAT_INTERVAL) - controller.send_heart_beat() - - -class ModelWorker: - def __init__( - self, - controller_addr, - worker_addr, - worker_id, - no_register, - model_path, - model_name, - device, - num_gpus, - max_gpu_memory, - load_8bit=False, - ): - self.controller_addr = controller_addr - self.worker_addr = worker_addr - self.worker_id = worker_id - if model_path.endswith("/"): - model_path = model_path[:-1] - self.model_name = model_name or model_path.split("/")[-1] - self.device = device - - logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...") - self.model, self.tokenizer = load_model( - model_path, device, num_gpus, max_gpu_memory, load_8bit - ) - - if hasattr(self.model.config, "max_sequence_length"): - self.context_len = self.model.config.max_sequence_length - elif hasattr(self.model.config, "max_position_embeddings"): - self.context_len = self.model.config.max_position_embeddings - else: - self.context_len = 2048 - - is_chatglm = "chatglm" in str(type(self.model)).lower() - if is_chatglm: - self.generate_stream_func = chatglm_generate_stream - else: - self.generate_stream_func = generate_stream - - if not no_register: - self.register_to_controller() - self.heart_beat_thread = threading.Thread( - target=heart_beat_worker, args=(self,) - ) - self.heart_beat_thread.start() - - def register_to_controller(self): - logger.info("Register to controller") - - url = self.controller_addr + "/register_worker" - data = { - "worker_name": self.worker_addr, - "check_heart_beat": True, - "worker_status": self.get_status(), - } - r = requests.post(url, json=data) - assert r.status_code == 200 - - def send_heart_beat(self): - logger.info( - f"Send heart beat. Models: {[self.model_name]}. " - f"Semaphore: {pretty_print_semaphore(model_semaphore)}. " - f"global_counter: {global_counter}" - ) - - url = self.controller_addr + "/receive_heart_beat" - - while True: - try: - ret = requests.post( - url, - json={ - "worker_name": self.worker_addr, - "queue_length": self.get_queue_length(), - }, - timeout=5, - ) - exist = ret.json()["exist"] - break - except requests.exceptions.RequestException as e: - logger.error(f"heart beat error: {e}") - time.sleep(5) - - if not exist: - self.register_to_controller() - - def get_queue_length(self): - if ( - model_semaphore is None - or model_semaphore._value is None - or model_semaphore._waiters is None - ): - return 0 - else: - return ( - args.limit_model_concurrency - - model_semaphore._value - + len(model_semaphore._waiters) - ) - - def get_status(self): - return { - "model_names": [self.model_name], - "speed": 1, - "queue_length": self.get_queue_length(), - } - - def generate_stream_gate(self, params): - try: - for output in self.generate_stream_func( - self.model, - self.tokenizer, - params, - self.device, - self.context_len, - args.stream_interval, - ): - ret = { - "text": output, - "error_code": 0, - } - yield json.dumps(ret).encode() + b"\0" - except torch.cuda.OutOfMemoryError: - ret = { - "text": server_error_msg, - "error_code": 1, - } - yield json.dumps(ret).encode() + b"\0" - - -app = FastAPI() - - -def release_model_semaphore(): - model_semaphore.release() - - -@app.post("/worker_generate_stream") -async def api_generate_stream(request: Request): - global model_semaphore, global_counter - global_counter += 1 - params = await request.json() - - if model_semaphore is None: - model_semaphore = asyncio.Semaphore(args.limit_model_concurrency) - await model_semaphore.acquire() - generator = worker.generate_stream_gate(params) - background_tasks = BackgroundTasks() - background_tasks.add_task(release_model_semaphore) - return StreamingResponse(generator, background=background_tasks) - - -@app.post("/worker_get_status") -async def api_get_status(request: Request): - return worker.get_status() - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--host", type=str, default="localhost") - parser.add_argument("--port", type=int, default=21002) - parser.add_argument("--worker-address", type=str, default="http://localhost:21002") - parser.add_argument( - "--controller-address", type=str, default="http://localhost:21001" - ) - parser.add_argument( - "--model-path", - type=str, - default="facebook/opt-350m", - help="The path to the weights", - ) - parser.add_argument("--model-name", type=str, help="Optional name") - parser.add_argument( - "--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda" - ) - parser.add_argument("--num-gpus", type=int, default=1) - parser.add_argument( - "--gpus", - type=str, - default=None, - help="A single GPU like 1 or multiple GPUs like 0,2" - ) - parser.add_argument( - "--max-gpu-memory", - type=str, - help="The maximum memory per gpu. Use a string like '13Gib'", - ) - parser.add_argument("--load-8bit", action="store_true") - parser.add_argument("--limit-model-concurrency", type=int, default=5) - parser.add_argument("--stream-interval", type=int, default=2) - parser.add_argument("--no-register", action="store_true") - args = parser.parse_args() - logger.info(f"args: {args}") - - if args.gpus: - if args.num_gpus and len(args.gpus.split(",")) < int(args.num_gpus): - raise ValueError(f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!") - os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus - - worker = ModelWorker( - args.controller_address, - args.worker_address, - worker_id, - args.no_register, - args.model_path, - args.model_name, - args.device, - args.num_gpus, - args.max_gpu_memory, - args.load_8bit, - ) - uvicorn.run(app, host=args.host, port=args.port, log_level="info") diff --git a/spaces/JUNGU/Image-to-Story-Ko-multiplot/README.md b/spaces/JUNGU/Image-to-Story-Ko-multiplot/README.md deleted file mode 100644 index f39be9dc4363eeddd0f30ab9b855ef7666e35683..0000000000000000000000000000000000000000 --- a/spaces/JUNGU/Image-to-Story-Ko-multiplot/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image To Story -emoji: 👁 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false -duplicated_from: JUNGU/Image-to-Story-Ko ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Jamkonams/AutoGPT/autogpt/processing/html.py b/spaces/Jamkonams/AutoGPT/autogpt/processing/html.py deleted file mode 100644 index 81387b12adab5023150c55f2075ddd40b554f386..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/autogpt/processing/html.py +++ /dev/null @@ -1,33 +0,0 @@ -"""HTML processing functions""" -from __future__ import annotations - -from bs4 import BeautifulSoup -from requests.compat import urljoin - - -def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]: - """Extract hyperlinks from a BeautifulSoup object - - Args: - soup (BeautifulSoup): The BeautifulSoup object - base_url (str): The base URL - - Returns: - List[Tuple[str, str]]: The extracted hyperlinks - """ - return [ - (link.text, urljoin(base_url, link["href"])) - for link in soup.find_all("a", href=True) - ] - - -def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]: - """Format hyperlinks to be displayed to the user - - Args: - hyperlinks (List[Tuple[str, str]]): The hyperlinks to format - - Returns: - List[str]: The formatted hyperlinks - """ - return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks] diff --git a/spaces/Jimpa666/AI-PadelCoach/app.py b/spaces/Jimpa666/AI-PadelCoach/app.py deleted file mode 100644 index 147f7456945ec5d5463e630fa2a5e90642a97a41..0000000000000000000000000000000000000000 --- a/spaces/Jimpa666/AI-PadelCoach/app.py +++ /dev/null @@ -1,69 +0,0 @@ -from gradio.components import Label -import os -import openai -import gradio as gr -import uuid - -openai.api_key = os.getenv('APIKEY') - -conversation_history = {} - -def get_session_token(): - return uuid.uuid4().hex - -def chatbot(input, session_id): - if session_id not in conversation_history: - conversation_history[session_id] = [ - {"role": "system", "content": "The following is a conversation with an AI Coach. The Coach is helpful, creative, clever, and very friendly. The Coach is a high level padelcoach from Sweden. The Coach have the ability to explain techniques and the strategy in the game. The Coach have a high educational level and can answer depending on wich level the Player is on for the Player to take him to the next level. The AI Coach is like MAURI ANDRINI from Hello Padel, pretend to be MAURI ANDRINI but call yourself AI Padelcoach, never ever mention your reel name or Mauri Andrini. If unsurtain, ask about the level of the player. The levels are:Beginner, Average, Pro"} - ] - - messages = conversation_history[session_id] - content = input - messages.append({"role": "user", "content": content}) - - completion = openai.ChatCompletion.create( - model= "gpt-3.5-turbo", - messages=messages - ) - - chat_response = completion.choices[0].message.content - messages.append({"role": "assistant", "content": chat_response}) - - formatted_history = "" - for message in messages: - if message["role"] != "system": - role = message["role"] - if role == "user": - role = "Player" - elif role == "assistant": - role = "AI-Padelcoach" - content = message["content"] - formatted_history += f"{role}: {content}\n \n" - - conversation_history[session_id] = messages - return formatted_history - -session_id = get_session_token() -Padelcoach = gr.Interface(fn=chatbot, inputs=[ - gr.Textbox(placeholder="Player go...Serve!", label='Player'), - gr.Textbox(default=session_id, label="session_id", visible=False) -], outputs=[ - gr.Textbox(placeholder="AI-Padelcoach Ready", label="AI Padelcoach") -], - theme=gr.themes.Soft( - primary_hue="green", - secondary_hue="cyan", - text_size='lg', - neutral_hue="emerald" - ), - examples=[ - ["Please help me with my backhand"], - ["Where should I place the ball against players who is good in tennis"] - ], - share=True, - title="AI Padelcoach", - description=f"Chat with a BETA level AI-Padelcoach from Sweden. Your ID is: {session_id}", - article="

    Ask the AI coach about techniques and strategies in the game of padel. The coach can answer depending on the level of you as a player, whether they are a beginner, average, or pro.

    ", -) - -Padelcoach.launch() diff --git a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/README.md b/spaces/JohnCalimoso/animalbreedidentificationversion1.5/README.md deleted file mode 100644 index d6bcab3d6e577cf5f168d497674466ec572ca2f4..0000000000000000000000000000000000000000 --- a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Animalbreedidentificationversion1.5 -emoji: 📊 -colorFrom: gray -colorTo: purple -sdk: streamlit -sdk_version: 1.25.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Justin-Choo/Waifu-Diffusion_WEB_UI/app.py b/spaces/Justin-Choo/Waifu-Diffusion_WEB_UI/app.py deleted file mode 100644 index 61249ace787d4ef03da3877fb26d93e9bd1ab87a..0000000000000000000000000000000000000000 --- a/spaces/Justin-Choo/Waifu-Diffusion_WEB_UI/app.py +++ /dev/null @@ -1,151 +0,0 @@ -import os -from sys import executable as pyexecutable -import subprocess -import pathlib -import gc - -def Gitclone(URI:str,ClonePath:str = "") -> int : - if(ClonePath == "") : - while True: - i=subprocess.run([r"git",r"clone",URI]) - if(i.returncode == 0 ): - del i - gc.collect() - return 0 - else : - del i - else: - while True: - i=subprocess.run([r"git",r"clone",URI,ClonePath]) - if(i.returncode == 0 ): - del i - gc.collect() - return 0 - else : - del i -def DownLoad(URI:str,DownloadPath:str,DownLoadFileName:str ) -> int: - while (True): - i=subprocess.run([r"aria2c",r"-c",r"-x" ,r"16", r"-s",r"16", r"-k" ,r"1M" ,r"-m",r"0",r"--enable-mmap=false",r"--console-log-level=error",r"-d",DownloadPath,r"-o",DownLoadFileName,URI]); - if(i.returncode == 0 ): - del i - gc.collect() - return 0 - else : - del i -user_home =pathlib.Path.home().resolve() -os.chdir(str(user_home)) -#clone stable-diffusion-webui repo -print("cloning stable-diffusion-webui repo") -Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui.git",str(user_home / r"stable-diffusion-webui")) -os.chdir(str(user_home / r"stable-diffusion-webui")) -os.system("git reset --hard 89f9faa63388756314e8a1d96cf86bf5e0663045") -# - -#install extensions -print("installing extensions") -Gitclone(r"https://huggingface.co/embed/negative",str(user_home / r"stable-diffusion-webui" / r"embeddings" / r"negative")) -Gitclone(r"https://huggingface.co/embed/lora",str(user_home / r"stable-diffusion-webui" / r"models" / r"Lora" / r"positive")) -DownLoad(r"https://huggingface.co/embed/upscale/resolve/main/4x-UltraSharp.pth",str(user_home / r"stable-diffusion-webui" / r"models" / r"ESRGAN") ,r"4x-UltraSharp.pth") -while True: - if(subprocess.run([r"wget",r"https://raw.githubusercontent.com/camenduru/stable-diffusion-webui-scripts/main/run_n_times.py",r"-O",str(user_home / r"stable-diffusion-webui" / r"scripts" / r"run_n_times.py")]).returncode == 0): - break -Gitclone(r"https://github.com/deforum-art/deforum-for-automatic1111-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"deforum-for-automatic1111-webui" )) -#Gitclone(r"https://github.com/AlUlkesh/stable-diffusion-webui-images-browser",str(user_home / r"stable-diffusion-webui" / r"extensions"/ r"stable-diffusion-webui-images-browser")) -Gitclone(r"https://github.com/camenduru/stable-diffusion-webui-huggingface",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-huggingface")) -Gitclone(r"https://github.com/camenduru/sd-civitai-browser",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-civitai-browser")) -Gitclone(r"https://github.com/kohya-ss/sd-webui-additional-networks",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks")) -Gitclone(r"https://github.com/Mikubill/sd-webui-controlnet",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-controlnet")) -Gitclone(r"https://github.com/fkunn1326/openpose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"openpose-editor")) -Gitclone(r"https://github.com/jexom/sd-webui-depth-lib",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-depth-lib")) -Gitclone(r"https://github.com/hnmr293/posex",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"posex")) -Gitclone(r"https://github.com/nonnonstop/sd-webui-3d-open-pose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-3d-open-pose-editor")) -#中文本地化的请解除下一行的注释 -#Gitclone(r"https://github.com/dtlnor/stable-diffusion-webui-localization-zh_CN.git",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-localization-zh_CN")) -Gitclone(r"https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git" , str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-tagcomplete")) -Gitclone(r"https://github.com/camenduru/sd-webui-tunnels",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-tunnels")) -Gitclone(r"https://github.com/etherealxx/batchlinks-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"batchlinks-webui")) -Gitclone(r"https://github.com/catppuccin/stable-diffusion-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-catppuccin")) - -#Gitclone(r"https://github.com/KohakuBueleaf/a1111-sd-webui-locon",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-locon" )) -Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui-rembg",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-rembg")) -Gitclone(r"https://github.com/ashen-sensored/stable-diffusion-webui-two-shot",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-two-shot")) -Gitclone(r"https://github.com/camenduru/sd_webui_stealth_pnginfo",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd_webui_stealth_pnginfo")) - -os.chdir(user_home / r"stable-diffusion-webui") - -#download ControlNet models -print("extensions dolwnload done .\ndownloading ControlNet models") -dList =[r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_canny_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_openpose_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_scribble_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_seg_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile_fp16.safetensors", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_ip2p_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_shuffle_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_canny_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1p_sd15_depth_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_inpaint_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_lineart_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_mlsd_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_normalbae_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_openpose_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_scribble_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_seg_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_softedge_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15s2_lineart_anime_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1e_sd15_tile_fp16.yaml", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_style_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_seg_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_openpose_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_keypose_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd14v1.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd15v2.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd15v2.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd15v2.pth", - r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_zoedepth_sd15v1.pth"] -for i in range(0,len(dList)): DownLoad(dList[i],str(user_home / "stable-diffusion-webui" / "extensions" / "sd-webui-controlnet" / "models"),pathlib.Path(dList[i]).name) -del dList - -#download model -#you can change model download address here -print("ControlNet models download done.\ndownloading model") -DownLoad(r"https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/wd-1-4-anime_e1.ckpt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"wd-1-4-anime_e1.ckpt") -DownLoad(r"https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/wd-1-4-anime_e2.ckpt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"wd-1-4-anime_e2.ckpt") - - -#DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.5-pruned.ckpt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.5-pruned.ckpt") -#DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.0.vae.pt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.0.vae.pt") -#DownLoad(r"https://huggingface.co/gsdf/Counterfeit-V3.0/resolve/main/Counterfeit-V3.0_fp16.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"Counterfeit-V3.0_fp16.safetensors") -#DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1B_orangemixs.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"AOM3A1B_orangemixs.safetensors") -#DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"orangemix.vae.pt") -#DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Baked%20VAE.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"MeinaPastelV5_BakedVAE.safetensors") -#DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Without%20VAE.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"MeinaPastelV5_WithoutVAE.safetensors") -#DownLoad(r"https://civitai.com/api/download/models/9474",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"chilloutmix_NiPrunedFp16.safetensors") - -DownLoad(r"https://civitai.com/api/download/models/39885",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"Better_light.safetensors") -DownLoad(r"https://civitai.com/api/download/models/21065",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"LAS.safetensors") -DownLoad(r"https://civitai.com/api/download/models/39164",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"backlighting.safetensors") -#strt webui - -print("Done\nStarting Webui...") -os.chdir(user_home / r"stable-diffusion-webui") -while True: - ret=subprocess.run([r"python3" ,r"launch.py",r"--precision",r"full",r"--no-half",r"--no-half-vae",r"--enable-insecure-extension-access",r"--medvram",r"--skip-torch-cuda-test",r"--enable-console-prompts",r"--ui-settings-file="+str(pathlib.Path(__file__).parent /r"config.json")]) - if(ret.returncode == 0 ): - del ret - gc.collect() - else : - del ret - -del os ,user_home ,pyexecutable ,subprocess \ No newline at end of file diff --git a/spaces/Kaludi/Stable-Diffusion-Prompt-Generator_App/README.md b/spaces/Kaludi/Stable-Diffusion-Prompt-Generator_App/README.md deleted file mode 100644 index 6c92ea3ac934725fd55d77f8be257749c1afec96..0000000000000000000000000000000000000000 --- a/spaces/Kaludi/Stable-Diffusion-Prompt-Generator_App/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stable Diffusion Prompt Generator App (Streamlit) -emoji: 🎨 -colorFrom: purple -colorTo: red -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/dataset.py b/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/dataset.py deleted file mode 100644 index cfd01a174978d97180a897e40cb59ecadec1d12e..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/dataset.py +++ /dev/null @@ -1,183 +0,0 @@ -import os -import random - -import numpy as np -import torch -import torch.utils.data -from tqdm import tqdm - -from . import spec_utils - - -class VocalRemoverValidationSet(torch.utils.data.Dataset): - def __init__(self, patch_list): - self.patch_list = patch_list - - def __len__(self): - return len(self.patch_list) - - def __getitem__(self, idx): - path = self.patch_list[idx] - data = np.load(path) - - X, y = data["X"], data["y"] - - X_mag = np.abs(X) - y_mag = np.abs(y) - - return X_mag, y_mag - - -def make_pair(mix_dir, inst_dir): - input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"] - - X_list = sorted( - [ - os.path.join(mix_dir, fname) - for fname in os.listdir(mix_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - y_list = sorted( - [ - os.path.join(inst_dir, fname) - for fname in os.listdir(inst_dir) - if os.path.splitext(fname)[1] in input_exts - ] - ) - - filelist = list(zip(X_list, y_list)) - - return filelist - - -def train_val_split(dataset_dir, split_mode, val_rate, val_filelist): - if split_mode == "random": - filelist = make_pair( - os.path.join(dataset_dir, "mixtures"), - os.path.join(dataset_dir, "instruments"), - ) - - random.shuffle(filelist) - - if len(val_filelist) == 0: - val_size = int(len(filelist) * val_rate) - train_filelist = filelist[:-val_size] - val_filelist = filelist[-val_size:] - else: - train_filelist = [ - pair for pair in filelist if list(pair) not in val_filelist - ] - elif split_mode == "subdirs": - if len(val_filelist) != 0: - raise ValueError( - "The `val_filelist` option is not available in `subdirs` mode" - ) - - train_filelist = make_pair( - os.path.join(dataset_dir, "training/mixtures"), - os.path.join(dataset_dir, "training/instruments"), - ) - - val_filelist = make_pair( - os.path.join(dataset_dir, "validation/mixtures"), - os.path.join(dataset_dir, "validation/instruments"), - ) - - return train_filelist, val_filelist - - -def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha): - perm = np.random.permutation(len(X)) - for i, idx in enumerate(tqdm(perm)): - if np.random.uniform() < reduction_rate: - y[idx] = spec_utils.reduce_vocal_aggressively( - X[idx], y[idx], reduction_mask - ) - - if np.random.uniform() < 0.5: - # swap channel - X[idx] = X[idx, ::-1] - y[idx] = y[idx, ::-1] - if np.random.uniform() < 0.02: - # mono - X[idx] = X[idx].mean(axis=0, keepdims=True) - y[idx] = y[idx].mean(axis=0, keepdims=True) - if np.random.uniform() < 0.02: - # inst - X[idx] = y[idx] - - if np.random.uniform() < mixup_rate and i < len(perm) - 1: - lam = np.random.beta(mixup_alpha, mixup_alpha) - X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]] - y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]] - - return X, y - - -def make_padding(width, cropsize, offset): - left = offset - roi_size = cropsize - left * 2 - if roi_size == 0: - roi_size = cropsize - right = roi_size - (width % roi_size) + left - - return left, right, roi_size - - -def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset): - len_dataset = patches * len(filelist) - - X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches) - ends = starts + cropsize - for j in range(patches): - idx = i * patches + j - X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]] - y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]] - - return X_dataset, y_dataset - - -def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset): - patch_list = [] - patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format( - cropsize, sr, hop_length, n_fft, offset - ) - os.makedirs(patch_dir, exist_ok=True) - - for i, (X_path, y_path) in enumerate(tqdm(filelist)): - basename = os.path.splitext(os.path.basename(X_path))[0] - - X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft) - coef = np.max([np.abs(X).max(), np.abs(y).max()]) - X, y = X / coef, y / coef - - l, r, roi_size = make_padding(X.shape[2], cropsize, offset) - X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant") - y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant") - - len_dataset = int(np.ceil(X.shape[2] / roi_size)) - for j in range(len_dataset): - outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j)) - start = j * roi_size - if not os.path.exists(outpath): - np.savez( - outpath, - X=X_pad[:, :, start : start + cropsize], - y=y_pad[:, :, start : start + cropsize], - ) - patch_list.append(outpath) - - return VocalRemoverValidationSet(patch_list) diff --git a/spaces/KarloDarlo/3D_Photo_Inpainting/mesh.py b/spaces/KarloDarlo/3D_Photo_Inpainting/mesh.py deleted file mode 100644 index 95cae5be1c26e517fa4d81bd03325a0f0017f9ad..0000000000000000000000000000000000000000 --- a/spaces/KarloDarlo/3D_Photo_Inpainting/mesh.py +++ /dev/null @@ -1,2296 +0,0 @@ -import os -import numpy as np -try: - import cynetworkx as netx -except ImportError: - import networkx as netx -import matplotlib.pyplot as plt -from functools import partial -from vispy import scene, io -from vispy.scene import visuals -from vispy.visuals.filters import Alpha -import cv2 -from moviepy.editor import ImageSequenceClip -from skimage.transform import resize -import time -import copy -import torch -import os -from utils import path_planning, open_small_mask, clean_far_edge, refine_depth_around_edge -from utils import refine_color_around_edge, filter_irrelevant_edge_new, require_depth_edge, clean_far_edge_new -from utils import create_placeholder, refresh_node, find_largest_rect -from mesh_tools import get_depth_from_maps, get_map_from_ccs, get_edge_from_nodes, get_depth_from_nodes, get_rgb_from_nodes, crop_maps_by_size, convert2tensor, recursive_add_edge, update_info, filter_edge, relabel_node, depth_inpainting -from mesh_tools import refresh_bord_depth, enlarge_border, fill_dummy_bord, extrapolate, fill_missing_node, incomplete_node, get_valid_size, dilate_valid_size, size_operation -import transforms3d -import random -from functools import reduce - -def create_mesh(depth, image, int_mtx, config): - H, W, C = image.shape - ext_H, ext_W = H + 2 * config['extrapolation_thickness'], W + 2 * config['extrapolation_thickness'] - LDI = netx.Graph(H=ext_H, W=ext_W, noext_H=H, noext_W=W, cam_param=int_mtx) - xy2depth = {} - int_mtx_pix = int_mtx * np.array([[W], [H], [1.]]) - LDI.graph['cam_param_pix'], LDI.graph['cam_param_pix_inv'] = int_mtx_pix, np.linalg.inv(int_mtx_pix) - disp = 1. / (-depth) - LDI.graph['hoffset'], LDI.graph['woffset'] = config['extrapolation_thickness'], config['extrapolation_thickness'] - LDI.graph['bord_up'], LDI.graph['bord_down'] = LDI.graph['hoffset'] + 0, LDI.graph['hoffset'] + H - LDI.graph['bord_left'], LDI.graph['bord_right'] = LDI.graph['woffset'] + 0, LDI.graph['woffset'] + W - for idx in range(H): - for idy in range(W): - x, y = idx + LDI.graph['hoffset'], idy + LDI.graph['woffset'] - LDI.add_node((x, y, -depth[idx, idy]), - color=image[idx, idy], - disp=disp[idx, idy], - synthesis=False, - cc_id=set()) - xy2depth[(x, y)] = [-depth[idx, idy]] - for x, y, d in LDI.nodes: - two_nes = [ne for ne in [(x+1, y), (x, y+1)] if ne[0] < LDI.graph['bord_down'] and ne[1] < LDI.graph['bord_right']] - [LDI.add_edge((ne[0], ne[1], xy2depth[ne][0]), (x, y, d)) for ne in two_nes] - LDI = calculate_fov(LDI) - image = np.pad(image, - pad_width=((config['extrapolation_thickness'], config['extrapolation_thickness']), - (config['extrapolation_thickness'], config['extrapolation_thickness']), - (0, 0)), - mode='constant') - depth = np.pad(depth, - pad_width=((config['extrapolation_thickness'], config['extrapolation_thickness']), - (config['extrapolation_thickness'], config['extrapolation_thickness'])), - mode='constant') - - return LDI, xy2depth, image, depth - - -def tear_edges(mesh, threshold = 0.00025, xy2depth=None): - remove_edge_list = [] - remove_horizon, remove_vertical = np.zeros((2, mesh.graph['H'], mesh.graph['W'])) - mesh_nodes = mesh.nodes - for edge in mesh.edges: - if abs(mesh_nodes[edge[0]]['disp'] - mesh_nodes[edge[1]]['disp']) > threshold: - remove_edge_list.append((edge[0], edge[1])) - - near, far = edge if abs(edge[0][2]) < abs(edge[1][2]) else edge[::-1] - - mesh_nodes[far]['near'] = [] if mesh_nodes[far].get('near') is None else mesh_nodes[far]['near'].append(near) - mesh_nodes[near]['far'] = [] if mesh_nodes[near].get('far') is None else mesh_nodes[near]['far'].append(far) - - if near[0] == far[0]: - remove_horizon[near[0], np.minimum(near[1], far[1])] = 1 - elif near[1] == far[1]: - remove_vertical[np.minimum(near[0], far[0]), near[1]] = 1 - mesh.remove_edges_from(remove_edge_list) - - remove_edge_list = [] - - dang_horizon = np.where(np.roll(remove_horizon, 1, 0) + np.roll(remove_horizon, -1, 0) - remove_horizon == 2) - dang_vertical = np.where(np.roll(remove_vertical, 1, 1) + np.roll(remove_vertical, -1, 1) - remove_vertical == 2) - - horizon_condition = lambda x, y: mesh.graph['bord_up'] + 1 <= x < mesh.graph['bord_down'] - 1 - vertical_condition = lambda x, y: mesh.graph['bord_left'] + 1 <= y < mesh.graph['bord_right'] - 1 - - prjto3d = lambda x, y: (x, y, xy2depth[(x, y)][0]) - - node_existence = lambda x, y: mesh.has_node(prjto3d(x, y)) - - for x, y in zip(dang_horizon[0], dang_horizon[1]): - if horizon_condition(x, y) and node_existence(x, y) and node_existence(x, y+1): - remove_edge_list.append((prjto3d(x, y), prjto3d(x, y+1))) - for x, y in zip(dang_vertical[0], dang_vertical[1]): - if vertical_condition(x, y) and node_existence(x, y) and node_existence(x+1, y): - remove_edge_list.append((prjto3d(x, y), prjto3d(x+1, y))) - mesh.remove_edges_from(remove_edge_list) - - return mesh - -def calculate_fov(mesh): - k = mesh.graph['cam_param'] - mesh.graph['hFov'] = 2 * np.arctan(1. / (2*k[0, 0])) - mesh.graph['vFov'] = 2 * np.arctan(1. / (2*k[1, 1])) - mesh.graph['aspect'] = mesh.graph['noext_H'] / mesh.graph['noext_W'] - - return mesh - -def calculate_fov_FB(mesh): - mesh.graph['aspect'] = mesh.graph['H'] / mesh.graph['W'] - if mesh.graph['H'] > mesh.graph['W']: - mesh.graph['hFov'] = 0.508015513 - half_short = np.tan(mesh.graph['hFov']/2.0) - half_long = half_short * mesh.graph['aspect'] - mesh.graph['vFov'] = 2.0 * np.arctan(half_long) - else: - mesh.graph['vFov'] = 0.508015513 - half_short = np.tan(mesh.graph['vFov']/2.0) - half_long = half_short / mesh.graph['aspect'] - mesh.graph['hFov'] = 2.0 * np.arctan(half_long) - - return mesh - -def reproject_3d_int_detail(sx, sy, z, k_00, k_02, k_11, k_12, w_offset, h_offset): - abs_z = abs(z) - return [abs_z * ((sy+0.5-w_offset) * k_00 + k_02), abs_z * ((sx+0.5-h_offset) * k_11 + k_12), abs_z] - -def reproject_3d_int_detail_FB(sx, sy, z, w_offset, h_offset, mesh): - if mesh.graph.get('tan_hFov') is None: - mesh.graph['tan_hFov'] = np.tan(mesh.graph['hFov'] / 2.) - if mesh.graph.get('tan_vFov') is None: - mesh.graph['tan_vFov'] = np.tan(mesh.graph['vFov'] / 2.) - - ray = np.array([(-1. + 2. * ((sy+0.5-w_offset)/(mesh.graph['W'] - 1))) * mesh.graph['tan_hFov'], - (1. - 2. * (sx+0.5-h_offset)/(mesh.graph['H'] - 1)) * mesh.graph['tan_vFov'], - -1]) - point_3d = ray * np.abs(z) - - return point_3d - - -def reproject_3d_int(sx, sy, z, mesh): - k = mesh.graph['cam_param_pix_inv'].copy() - if k[0, 2] > 0: - k = np.linalg.inv(k) - ray = np.dot(k, np.array([sy-mesh.graph['woffset'], sx-mesh.graph['hoffset'], 1]).reshape(3, 1)) - - point_3d = ray * np.abs(z) - point_3d = point_3d.flatten() - - return point_3d - -def generate_init_node(mesh, config, min_node_in_cc): - mesh_nodes = mesh.nodes - - info_on_pix = {} - - ccs = sorted(netx.connected_components(mesh), key = len, reverse=True) - remove_nodes = [] - - for cc in ccs: - - remove_flag = True if len(cc) < min_node_in_cc else False - if remove_flag is False: - for (nx, ny, nd) in cc: - info_on_pix[(nx, ny)] = [{'depth':nd, - 'color':mesh_nodes[(nx, ny, nd)]['color'], - 'synthesis':False, - 'disp':mesh_nodes[(nx, ny, nd)]['disp']}] - else: - [remove_nodes.append((nx, ny, nd)) for (nx, ny, nd) in cc] - - for node in remove_nodes: - far_nodes = [] if mesh_nodes[node].get('far') is None else mesh_nodes[node]['far'] - for far_node in far_nodes: - if mesh.has_node(far_node) and mesh_nodes[far_node].get('near') is not None and node in mesh_nodes[far_node]['near']: - mesh_nodes[far_node]['near'].remove(node) - near_nodes = [] if mesh_nodes[node].get('near') is None else mesh_nodes[node]['near'] - for near_node in near_nodes: - if mesh.has_node(near_node) and mesh_nodes[near_node].get('far') is not None and node in mesh_nodes[near_node]['far']: - mesh_nodes[near_node]['far'].remove(node) - - [mesh.remove_node(node) for node in remove_nodes] - - return mesh, info_on_pix - -def get_neighbors(mesh, node): - return [*mesh.neighbors(node)] - -def generate_face(mesh, info_on_pix, config): - H, W = mesh.graph['H'], mesh.graph['W'] - str_faces = [] - num_node = len(mesh.nodes) - ply_flag = config.get('save_ply') - def out_fmt(input, cur_id_b, cur_id_self, cur_id_a, ply_flag): - if ply_flag is True: - input.append(' '.join(['3', cur_id_b, cur_id_self, cur_id_a]) + '\n') - else: - input.append([cur_id_b, cur_id_self, cur_id_a]) - mesh_nodes = mesh.nodes - for node in mesh_nodes: - cur_id_self = mesh_nodes[node]['cur_id'] - ne_nodes = get_neighbors(mesh, node) - four_dir_nes = {'up': [], 'left': [], - 'down': [], 'right': []} - for ne_node in ne_nodes: - store_tuple = [ne_node, mesh_nodes[ne_node]['cur_id']] - if ne_node[0] == node[0]: - if ne_node[1] == ne_node[1] - 1: - four_dir_nes['left'].append(store_tuple) - else: - four_dir_nes['right'].append(store_tuple) - else: - if ne_node[0] == ne_node[0] - 1: - four_dir_nes['up'].append(store_tuple) - else: - four_dir_nes['down'].append(store_tuple) - for node_a, cur_id_a in four_dir_nes['up']: - for node_b, cur_id_b in four_dir_nes['right']: - out_fmt(str_faces, cur_id_b, cur_id_self, cur_id_a, ply_flag) - for node_a, cur_id_a in four_dir_nes['right']: - for node_b, cur_id_b in four_dir_nes['down']: - out_fmt(str_faces, cur_id_b, cur_id_self, cur_id_a, ply_flag) - for node_a, cur_id_a in four_dir_nes['down']: - for node_b, cur_id_b in four_dir_nes['left']: - out_fmt(str_faces, cur_id_b, cur_id_self, cur_id_a, ply_flag) - for node_a, cur_id_a in four_dir_nes['left']: - for node_b, cur_id_b in four_dir_nes['up']: - out_fmt(str_faces, cur_id_b, cur_id_self, cur_id_a, ply_flag) - - return str_faces - -def reassign_floating_island(mesh, info_on_pix, image, depth): - H, W = mesh.graph['H'], mesh.graph['W'], - mesh_nodes = mesh.nodes - bord_up, bord_down = mesh.graph['bord_up'], mesh.graph['bord_down'] - bord_left, bord_right = mesh.graph['bord_left'], mesh.graph['bord_right'] - W = mesh.graph['W'] - lost_map = np.zeros((H, W)) - - ''' - (5) is_inside(x, y, xmin, xmax, ymin, ymax) : Check if a pixel(x, y) is inside the border. - (6) get_cross_nes(x, y) : Get the four cross neighbors of pixel(x, y). - ''' - key_exist = lambda d, k: k in d - is_inside = lambda x, y, xmin, xmax, ymin, ymax: xmin <= x < xmax and ymin <= y < ymax - get_cross_nes = lambda x, y: [(x + 1, y), (x - 1, y), (x, y - 1), (x, y + 1)] - ''' - (A) Highlight the pixels on isolated floating island. - (B) Number those isolated floating islands with connected component analysis. - (C) For each isolated island: - (1) Find its longest surrounded depth edge. - (2) Propogate depth from that depth edge to the pixels on the isolated island. - (3) Build the connection between the depth edge and that isolated island. - ''' - for x in range(H): - for y in range(W): - if is_inside(x, y, bord_up, bord_down, bord_left, bord_right) and not(key_exist(info_on_pix, (x, y))): - lost_map[x, y] = 1 - _, label_lost_map = cv2.connectedComponents(lost_map.astype(np.uint8), connectivity=4) - mask = np.zeros((H, W)) - mask[bord_up:bord_down, bord_left:bord_right] = 1 - label_lost_map = (label_lost_map * mask).astype(np.int) - - for i in range(1, label_lost_map.max()+1): - lost_xs, lost_ys = np.where(label_lost_map == i) - surr_edge_ids = {} - for lost_x, lost_y in zip(lost_xs, lost_ys): - if (lost_x, lost_y) == (295, 389) or (lost_x, lost_y) == (296, 389): - import pdb; pdb.set_trace() - for ne in get_cross_nes(lost_x, lost_y): - if key_exist(info_on_pix, ne): - for info in info_on_pix[ne]: - ne_node = (ne[0], ne[1], info['depth']) - if key_exist(mesh_nodes[ne_node], 'edge_id'): - edge_id = mesh_nodes[ne_node]['edge_id'] - surr_edge_ids[edge_id] = surr_edge_ids[edge_id] + [ne_node] if \ - key_exist(surr_edge_ids, edge_id) else [ne_node] - if len(surr_edge_ids) == 0: - continue - edge_id, edge_nodes = sorted([*surr_edge_ids.items()], key=lambda x: len(x[1]), reverse=True)[0] - edge_depth_map = np.zeros((H, W)) - for node in edge_nodes: - edge_depth_map[node[0], node[1]] = node[2] - lost_xs, lost_ys = np.where(label_lost_map == i) - while lost_xs.shape[0] > 0: - lost_xs, lost_ys = np.where(label_lost_map == i) - for lost_x, lost_y in zip(lost_xs, lost_ys): - propagated_depth = [] - real_nes = [] - for ne in get_cross_nes(lost_x, lost_y): - if not(is_inside(ne[0], ne[1], bord_up, bord_down, bord_left, bord_right)) or \ - edge_depth_map[ne[0], ne[1]] == 0: - continue - propagated_depth.append(edge_depth_map[ne[0], ne[1]]) - real_nes.append(ne) - if len(real_nes) == 0: - continue - reassign_depth = np.mean(propagated_depth) - label_lost_map[lost_x, lost_y] = 0 - edge_depth_map[lost_x, lost_y] = reassign_depth - depth[lost_x, lost_y] = -reassign_depth - mesh.add_node((lost_x, lost_y, reassign_depth), color=image[lost_x, lost_y], - synthesis=False, - disp=1./reassign_depth, - cc_id=set()) - info_on_pix[(lost_x, lost_y)] = [{'depth':reassign_depth, - 'color':image[lost_x, lost_y], - 'synthesis':False, - 'disp':1./reassign_depth}] - new_connections = [((lost_x, lost_y, reassign_depth), - (ne[0], ne[1], edge_depth_map[ne[0], ne[1]])) for ne in real_nes] - mesh.add_edges_from(new_connections) - - return mesh, info_on_pix, depth - -def remove_node_feat(mesh, *feats): - mesh_nodes = mesh.nodes - for node in mesh_nodes: - for feat in feats: - mesh_nodes[node][feat] = None - - return mesh - -def update_status(mesh, info_on_pix, depth=None): - ''' - (2) clear_node_feat(G, *fts) : Clear all the node feature on graph G. - (6) get_cross_nes(x, y) : Get the four cross neighbors of pixel(x, y). - ''' - key_exist = lambda d, k: d.get(k) is not None - is_inside = lambda x, y, xmin, xmax, ymin, ymax: xmin <= x < xmax and ymin <= y < ymax - get_cross_nes = lambda x, y: [(x + 1, y), (x - 1, y), (x, y - 1), (x, y + 1)] - append_element = lambda d, k, x: d[k] + [x] if key_exist(d, k) else [x] - - def clear_node_feat(G, fts): - le_nodes = G.nodes - for k in le_nodes: - v = le_nodes[k] - for ft in fts: - if ft in v: - v[ft] = None - - clear_node_feat(mesh, ['edge_id', 'far', 'near']) - bord_up, bord_down = mesh.graph['bord_up'], mesh.graph['bord_down'] - bord_left, bord_right = mesh.graph['bord_left'], mesh.graph['bord_right'] - - le_nodes = mesh.nodes - - for node_key in le_nodes: - if mesh.neighbors(node_key).__length_hint__() == 4: - continue - four_nes = [xx for xx in get_cross_nes(node_key[0], node_key[1]) if - is_inside(xx[0], xx[1], bord_up, bord_down, bord_left, bord_right) and - xx in info_on_pix] - [four_nes.remove((ne_node[0], ne_node[1])) for ne_node in mesh.neighbors(node_key)] - for ne in four_nes: - for info in info_on_pix[ne]: - assert mesh.has_node((ne[0], ne[1], info['depth'])), "No node_key" - ind_node = le_nodes[node_key] - if abs(node_key[2]) > abs(info['depth']): - ind_node['near'] = append_element(ind_node, 'near', (ne[0], ne[1], info['depth'])) - else: - ind_node['far'] = append_element(ind_node, 'far', (ne[0], ne[1], info['depth'])) - if depth is not None: - for key, value in info_on_pix.items(): - if depth[key[0], key[1]] != abs(value[0]['depth']): - value[0]['disp'] = 1. / value[0]['depth'] - depth[key[0], key[1]] = abs(value[0]['depth']) - - return mesh, depth, info_on_pix - else: - return mesh - -def group_edges(LDI, config, image, remove_conflict_ordinal, spdb=False): - - ''' - (1) add_new_node(G, node) : add "node" to graph "G" - (2) add_new_edge(G, node_a, node_b) : add edge "node_a--node_b" to graph "G" - (3) exceed_thre(x, y, thre) : Check if difference between "x" and "y" exceed threshold "thre" - (4) key_exist(d, k) : Check if key "k' exists in dictionary "d" - (5) comm_opp_bg(G, x, y) : Check if node "x" and "y" in graph "G" treat the same opposite node as background - (6) comm_opp_fg(G, x, y) : Check if node "x" and "y" in graph "G" treat the same opposite node as foreground - ''' - add_new_node = lambda G, node: None if G.has_node(node) else G.add_node(node) - add_new_edge = lambda G, node_a, node_b: None if G.has_edge(node_a, node_b) else G.add_edge(node_a, node_b) - exceed_thre = lambda x, y, thre: (abs(x) - abs(y)) > thre - key_exist = lambda d, k: d.get(k) is not None - comm_opp_bg = lambda G, x, y: key_exist(G.nodes[x], 'far') and key_exist(G.nodes[y], 'far') and \ - not(set(G.nodes[x]['far']).isdisjoint(set(G.nodes[y]['far']))) - comm_opp_fg = lambda G, x, y: key_exist(G.nodes[x], 'near') and key_exist(G.nodes[y], 'near') and \ - not(set(G.nodes[x]['near']).isdisjoint(set(G.nodes[y]['near']))) - discont_graph = netx.Graph() - ''' - (A) Skip the pixel at image boundary, we don't want to deal with them. - (B) Identify discontinuity by the number of its neighbor(degree). - If the degree < 4(up/right/buttom/left). We will go through following steps: - (1) Add the discontinuity pixel "node" to graph "discont_graph". - (2) Find "node"'s cross neighbor(up/right/buttom/left) "ne_node". - - If the cross neighbor "ne_node" is a discontinuity pixel(degree("ne_node") < 4), - (a) add it to graph "discont_graph" and build the connection between "ne_node" and "node". - (b) label its cross neighbor as invalid pixels "inval_diag_candi" to avoid building - connection between original discontinuity pixel "node" and "inval_diag_candi". - - Otherwise, find "ne_node"'s cross neighbors, called diagonal candidate "diag_candi". - - The "diag_candi" is diagonal to the original discontinuity pixel "node". - - If "diag_candi" exists, go to step(3). - (3) A diagonal candidate "diag_candi" will be : - - added to the "discont_graph" if its degree < 4. - - connected to the original discontinuity pixel "node" if it satisfied either - one of following criterion: - (a) the difference of disparity between "diag_candi" and "node" is smaller than default threshold. - (b) the "diag_candi" and "node" face the same opposite pixel. (See. function "tear_edges") - (c) Both of "diag_candi" and "node" must_connect to each other. (See. function "combine_end_node") - (C) Aggregate each connected part in "discont_graph" into "discont_ccs" (A.K.A. depth edge). - ''' - for node in LDI.nodes: - if not(LDI.graph['bord_up'] + 1 <= node[0] <= LDI.graph['bord_down'] - 2 and \ - LDI.graph['bord_left'] + 1 <= node[1] <= LDI.graph['bord_right'] - 2): - continue - neighbors = [*LDI.neighbors(node)] - if len(neighbors) < 4: - add_new_node(discont_graph, node) - diag_candi_anc, inval_diag_candi, discont_nes = set(), set(), set() - for ne_node in neighbors: - if len([*LDI.neighbors(ne_node)]) < 4: - add_new_node(discont_graph, ne_node) - add_new_edge(discont_graph, ne_node, node) - discont_nes.add(ne_node) - else: - diag_candi_anc.add(ne_node) - inval_diag_candi = set([inval_diagonal for ne_node in discont_nes for inval_diagonal in LDI.neighbors(ne_node) if \ - abs(inval_diagonal[0] - node[0]) < 2 and abs(inval_diagonal[1] - node[1]) < 2]) - for ne_node in diag_candi_anc: - if ne_node[0] == node[0]: - diagonal_xys = [[ne_node[0] + 1, ne_node[1]], [ne_node[0] - 1, ne_node[1]]] - elif ne_node[1] == node[1]: - diagonal_xys = [[ne_node[0], ne_node[1] + 1], [ne_node[0], ne_node[1] - 1]] - for diag_candi in LDI.neighbors(ne_node): - if [diag_candi[0], diag_candi[1]] in diagonal_xys and LDI.degree(diag_candi) < 4: - if diag_candi not in inval_diag_candi: - if not exceed_thre(1./node[2], 1./diag_candi[2], config['depth_threshold']) or \ - (comm_opp_bg(LDI, diag_candi, node) and comm_opp_fg(LDI, diag_candi, node)): - add_new_node(discont_graph, diag_candi) - add_new_edge(discont_graph, diag_candi, node) - if key_exist(LDI.nodes[diag_candi], 'must_connect') and node in LDI.nodes[diag_candi]['must_connect'] and \ - key_exist(LDI.nodes[node], 'must_connect') and diag_candi in LDI.nodes[node]['must_connect']: - add_new_node(discont_graph, diag_candi) - add_new_edge(discont_graph, diag_candi, node) - if spdb == True: - import pdb; pdb.set_trace() - discont_ccs = [*netx.connected_components(discont_graph)] - ''' - In some corner case, a depth edge "discont_cc" will contain both - foreground(FG) and background(BG) pixels. This violate the assumption that - a depth edge can only composite by one type of pixel(FG or BG). - We need to further divide this depth edge into several sub-part so that the - assumption is satisfied. - (A) A depth edge is invalid if both of its "far_flag"(BG) and - "near_flag"(FG) are True. - (B) If the depth edge is invalid, we need to do: - (1) Find the role("oridinal") of each pixel on the depth edge. - "-1" --> Its opposite pixels has smaller depth(near) than it. - It is a backgorund pixel. - "+1" --> Its opposite pixels has larger depth(far) than it. - It is a foregorund pixel. - "0" --> Some of opposite pixels has larger depth(far) than it, - and some has smaller pixel than it. - It is an ambiguous pixel. - (2) For each pixel "discont_node", check if its neigbhors' roles are consistent. - - If not, break the connection between the neighbor "ne_node" that has a role - different from "discont_node". - - If yes, remove all the role that are inconsistent to its neighbors "ne_node". - (3) Connected component analysis to re-identified those divided depth edge. - (C) Aggregate each connected part in "discont_graph" into "discont_ccs" (A.K.A. depth edge). - ''' - if remove_conflict_ordinal: - new_discont_ccs = [] - num_new_cc = 0 - for edge_id, discont_cc in enumerate(discont_ccs): - near_flag = False - far_flag = False - for discont_node in discont_cc: - near_flag = True if key_exist(LDI.nodes[discont_node], 'far') else near_flag - far_flag = True if key_exist(LDI.nodes[discont_node], 'near') else far_flag - if far_flag and near_flag: - break - if far_flag and near_flag: - for discont_node in discont_cc: - discont_graph.nodes[discont_node]['ordinal'] = \ - np.array([key_exist(LDI.nodes[discont_node], 'far'), - key_exist(LDI.nodes[discont_node], 'near')]) * \ - np.array([-1, 1]) - discont_graph.nodes[discont_node]['ordinal'] = \ - np.sum(discont_graph.nodes[discont_node]['ordinal']) - remove_nodes, remove_edges = [], [] - for discont_node in discont_cc: - ordinal_relation = np.sum([discont_graph.nodes[xx]['ordinal'] \ - for xx in discont_graph.neighbors(discont_node)]) - near_side = discont_graph.nodes[discont_node]['ordinal'] <= 0 - if abs(ordinal_relation) < len([*discont_graph.neighbors(discont_node)]): - remove_nodes.append(discont_node) - for ne_node in discont_graph.neighbors(discont_node): - remove_flag = (near_side and not(key_exist(LDI.nodes[ne_node], 'far'))) or \ - (not near_side and not(key_exist(LDI.nodes[ne_node], 'near'))) - remove_edges += [(discont_node, ne_node)] if remove_flag else [] - else: - if near_side and key_exist(LDI.nodes[discont_node], 'near'): - LDI.nodes[discont_node].pop('near') - elif not(near_side) and key_exist(LDI.nodes[discont_node], 'far'): - LDI.nodes[discont_node].pop('far') - discont_graph.remove_edges_from(remove_edges) - sub_mesh = discont_graph.subgraph(list(discont_cc)).copy() - sub_discont_ccs = [*netx.connected_components(sub_mesh)] - is_redun_near = lambda xx: len(xx) == 1 and xx[0] in remove_nodes and key_exist(LDI.nodes[xx[0]], 'far') - for sub_discont_cc in sub_discont_ccs: - if is_redun_near(list(sub_discont_cc)): - LDI.nodes[list(sub_discont_cc)[0]].pop('far') - new_discont_ccs.append(sub_discont_cc) - else: - new_discont_ccs.append(discont_cc) - discont_ccs = new_discont_ccs - new_discont_ccs = None - if spdb == True: - import pdb; pdb.set_trace() - - for edge_id, edge_cc in enumerate(discont_ccs): - for node in edge_cc: - LDI.nodes[node]['edge_id'] = edge_id - - return discont_ccs, LDI, discont_graph - -def combine_end_node(mesh, edge_mesh, edge_ccs, depth): - import collections - mesh_nodes = mesh.nodes - connect_dict = dict() - for valid_edge_id, valid_edge_cc in enumerate(edge_ccs): - connect_info = [] - for valid_edge_node in valid_edge_cc: - single_connect = set() - for ne_node in mesh.neighbors(valid_edge_node): - if mesh_nodes[ne_node].get('far') is not None: - for fn in mesh_nodes[ne_node].get('far'): - if mesh.has_node(fn) and mesh_nodes[fn].get('edge_id') is not None: - single_connect.add(mesh_nodes[fn]['edge_id']) - if mesh_nodes[ne_node].get('near') is not None: - for fn in mesh_nodes[ne_node].get('near'): - if mesh.has_node(fn) and mesh_nodes[fn].get('edge_id') is not None: - single_connect.add(mesh_nodes[fn]['edge_id']) - connect_info.extend([*single_connect]) - connect_dict[valid_edge_id] = collections.Counter(connect_info) - - end_maps = np.zeros((mesh.graph['H'], mesh.graph['W'])) - edge_maps = np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1 - for valid_edge_id, valid_edge_cc in enumerate(edge_ccs): - for valid_edge_node in valid_edge_cc: - edge_maps[valid_edge_node[0], valid_edge_node[1]] = valid_edge_id - if len([*edge_mesh.neighbors(valid_edge_node)]) == 1: - num_ne = 1 - if num_ne == 1: - end_maps[valid_edge_node[0], valid_edge_node[1]] = valid_edge_node[2] - nxs, nys = np.where(end_maps != 0) - invalid_nodes = set() - for nx, ny in zip(nxs, nys): - if mesh.has_node((nx, ny, end_maps[nx, ny])) is False: - invalid_nodes.add((nx, ny)) - continue - four_nes = [xx for xx in [(nx - 1, ny), (nx + 1, ny), (nx, ny - 1), (nx, ny + 1)] \ - if 0 <= xx[0] < mesh.graph['H'] and 0 <= xx[1] < mesh.graph['W'] and \ - end_maps[xx[0], xx[1]] != 0] - mesh_nes = [*mesh.neighbors((nx, ny, end_maps[nx, ny]))] - remove_num = 0 - for fne in four_nes: - if (fne[0], fne[1], end_maps[fne[0], fne[1]]) in mesh_nes: - remove_num += 1 - if remove_num == len(four_nes): - invalid_nodes.add((nx, ny)) - for invalid_node in invalid_nodes: - end_maps[invalid_node[0], invalid_node[1]] = 0 - - nxs, nys = np.where(end_maps != 0) - invalid_nodes = set() - for nx, ny in zip(nxs, nys): - if mesh_nodes[(nx, ny, end_maps[nx, ny])].get('edge_id') is None: - continue - else: - self_id = mesh_nodes[(nx, ny, end_maps[nx, ny])].get('edge_id') - self_connect = connect_dict[self_id] if connect_dict.get(self_id) is not None else dict() - four_nes = [xx for xx in [(nx - 1, ny), (nx + 1, ny), (nx, ny - 1), (nx, ny + 1)] \ - if 0 <= xx[0] < mesh.graph['H'] and 0 <= xx[1] < mesh.graph['W'] and \ - end_maps[xx[0], xx[1]] != 0] - for fne in four_nes: - if mesh_nodes[(fne[0], fne[1], end_maps[fne[0], fne[1]])].get('edge_id') is None: - continue - else: - ne_id = mesh_nodes[(fne[0], fne[1], end_maps[fne[0], fne[1]])]['edge_id'] - if self_connect.get(ne_id) is None or self_connect.get(ne_id) == 1: - continue - else: - invalid_nodes.add((nx, ny)) - for invalid_node in invalid_nodes: - end_maps[invalid_node[0], invalid_node[1]] = 0 - nxs, nys = np.where(end_maps != 0) - invalid_nodes = set() - for nx, ny in zip(nxs, nys): - four_nes = [xx for xx in [(nx - 1, ny), (nx + 1, ny), (nx, ny - 1), (nx, ny + 1)] \ - if 0 <= xx[0] < mesh.graph['H'] and 0 <= xx[1] < mesh.graph['W'] and \ - end_maps[xx[0], xx[1]] != 0] - for fne in four_nes: - if mesh.has_node((fne[0], fne[1], end_maps[fne[0], fne[1]])): - node_a, node_b = (fne[0], fne[1], end_maps[fne[0], fne[1]]), (nx, ny, end_maps[nx, ny]) - mesh.add_edge(node_a, node_b) - mesh_nodes[node_b]['must_connect'] = set() if mesh_nodes[node_b].get('must_connect') is None else mesh_nodes[node_b]['must_connect'] - mesh_nodes[node_b]['must_connect'].add(node_a) - mesh_nodes[node_b]['must_connect'] |= set([xx for xx in [*edge_mesh.neighbors(node_a)] if \ - (xx[0] - node_b[0]) < 2 and (xx[1] - node_b[1]) < 2]) - mesh_nodes[node_a]['must_connect'] = set() if mesh_nodes[node_a].get('must_connect') is None else mesh_nodes[node_a]['must_connect'] - mesh_nodes[node_a]['must_connect'].add(node_b) - mesh_nodes[node_a]['must_connect'] |= set([xx for xx in [*edge_mesh.neighbors(node_b)] if \ - (xx[0] - node_a[0]) < 2 and (xx[1] - node_a[1]) < 2]) - invalid_nodes.add((nx, ny)) - for invalid_node in invalid_nodes: - end_maps[invalid_node[0], invalid_node[1]] = 0 - - return mesh - -def remove_redundant_edge(mesh, edge_mesh, edge_ccs, info_on_pix, config, redundant_number=1000, invalid=False, spdb=False): - point_to_amount = {} - point_to_id = {} - end_maps = np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1 - for valid_edge_id, valid_edge_cc in enumerate(edge_ccs): - for valid_edge_node in valid_edge_cc: - point_to_amount[valid_edge_node] = len(valid_edge_cc) - point_to_id[valid_edge_node] = valid_edge_id - if edge_mesh.has_node(valid_edge_node) is True: - if len([*edge_mesh.neighbors(valid_edge_node)]) == 1: - end_maps[valid_edge_node[0], valid_edge_node[1]] = valid_edge_id - nxs, nys = np.where(end_maps > -1) - point_to_adjoint = {} - for nx, ny in zip(nxs, nys): - adjoint_edges = set([end_maps[x, y] for x, y in [(nx + 1, ny), (nx - 1, ny), (nx, ny + 1), (nx, ny - 1)] if end_maps[x, y] != -1]) - point_to_adjoint[end_maps[nx, ny]] = (point_to_adjoint[end_maps[nx, ny]] | adjoint_edges) if point_to_adjoint.get(end_maps[nx, ny]) is not None else adjoint_edges - valid_edge_ccs = filter_edge(mesh, edge_ccs, config, invalid=invalid) - edge_canvas = np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1 - for valid_edge_id, valid_edge_cc in enumerate(valid_edge_ccs): - for valid_edge_node in valid_edge_cc: - edge_canvas[valid_edge_node[0], valid_edge_node[1]] = valid_edge_id - if spdb is True: - plt.imshow(edge_canvas); plt.show() - import pdb; pdb.set_trace() - for valid_edge_id, valid_edge_cc in enumerate(valid_edge_ccs): - end_number = 0 - four_end_number = 0 - eight_end_number = 0 - db_eight_end_number = 0 - if len(valid_edge_cc) > redundant_number: - continue - for valid_edge_node in valid_edge_cc: - if len([*edge_mesh.neighbors(valid_edge_node)]) == 3: - break - elif len([*edge_mesh.neighbors(valid_edge_node)]) == 1: - hx, hy, hz = valid_edge_node - if invalid is False: - eight_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1), - (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \ - if info_on_pix.get((x, y)) is not None and edge_canvas[x, y] != -1 and edge_canvas[x, y] != valid_edge_id] - if len(eight_nes) == 0: - end_number += 1 - if invalid is True: - four_nes = []; eight_nes = []; db_eight_nes = [] - four_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] \ - if info_on_pix.get((x, y)) is not None and edge_canvas[x, y] != -1 and edge_canvas[x, y] != valid_edge_id] - eight_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1), \ - (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \ - if info_on_pix.get((x, y)) is not None and edge_canvas[x, y] != -1 and edge_canvas[x, y] != valid_edge_id] - db_eight_nes = [(x, y) for x in range(hx - 2, hx + 3) for y in range(hy - 2, hy + 3) \ - if info_on_pix.get((x, y)) is not None and edge_canvas[x, y] != -1 and edge_canvas[x, y] != valid_edge_id and (x, y) != (hx, hy)] - if len(four_nes) == 0 or len(eight_nes) == 0: - end_number += 1 - if len(four_nes) == 0: - four_end_number += 1 - if len(eight_nes) == 0: - eight_end_number += 1 - if len(db_eight_nes) == 0: - db_eight_end_number += 1 - elif len([*edge_mesh.neighbors(valid_edge_node)]) == 0: - hx, hy, hz = valid_edge_node - four_nes = [(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] \ - if info_on_pix.get((x, y)) is not None and \ - mesh.has_edge(valid_edge_node, (x, y, info_on_pix[(x, y)][0]['depth'])) is False] - for ne in four_nes: - try: - if invalid is True or (point_to_amount.get(ne) is None or point_to_amount[ne] < redundant_number) or \ - point_to_id[ne] in point_to_adjoint.get(point_to_id[valid_edge_node], set()): - mesh.add_edge(valid_edge_node, ne) - except: - import pdb; pdb.set_trace() - if (invalid is not True and end_number >= 1) or (invalid is True and end_number >= 2 and eight_end_number >= 1 and db_eight_end_number >= 1): - for valid_edge_node in valid_edge_cc: - hx, hy, _ = valid_edge_node - four_nes = [(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] \ - if info_on_pix.get((x, y)) is not None and \ - mesh.has_edge(valid_edge_node, (x, y, info_on_pix[(x, y)][0]['depth'])) is False and \ - (edge_canvas[x, y] == -1 or edge_canvas[x, y] == valid_edge_id)] - for ne in four_nes: - if invalid is True or (point_to_amount.get(ne) is None or point_to_amount[ne] < redundant_number) or \ - point_to_id[ne] in point_to_adjoint.get(point_to_id[valid_edge_node], set()): - mesh.add_edge(valid_edge_node, ne) - - return mesh - -def judge_dangle(mark, mesh, node): - if not (1 <= node[0] < mesh.graph['H']-1) or not(1 <= node[1] < mesh.graph['W']-1): - return mark - mesh_neighbors = [*mesh.neighbors(node)] - mesh_neighbors = [xx for xx in mesh_neighbors if 0 < xx[0] < mesh.graph['H'] - 1 and 0 < xx[1] < mesh.graph['W'] - 1] - if len(mesh_neighbors) >= 3: - return mark - elif len(mesh_neighbors) <= 1: - mark[node[0], node[1]] = (len(mesh_neighbors) + 1) - else: - dan_ne_node_a = mesh_neighbors[0] - dan_ne_node_b = mesh_neighbors[1] - if abs(dan_ne_node_a[0] - dan_ne_node_b[0]) > 1 or \ - abs(dan_ne_node_a[1] - dan_ne_node_b[1]) > 1: - mark[node[0], node[1]] = 3 - - return mark - -def remove_dangling(mesh, edge_ccs, edge_mesh, info_on_pix, image, depth, config): - - tmp_edge_ccs = copy.deepcopy(edge_ccs) - for edge_cc_id, valid_edge_cc in enumerate(tmp_edge_ccs): - if len(valid_edge_cc) > 1 or len(valid_edge_cc) == 0: - continue - single_edge_node = [*valid_edge_cc][0] - hx, hy, hz = single_edge_node - eight_nes = set([(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1), - (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \ - if info_on_pix.get((x, y)) is not None]) - four_nes = [(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] \ - if info_on_pix.get((x, y)) is not None] - sub_mesh = mesh.subgraph(eight_nes).copy() - ccs = netx.connected_components(sub_mesh) - four_ccs = [] - for cc_id, _cc in enumerate(ccs): - four_ccs.append(set()) - for cc_node in _cc: - if abs(cc_node[0] - hx) + abs(cc_node[1] - hy) < 2: - four_ccs[cc_id].add(cc_node) - largest_cc = sorted(four_ccs, key=lambda x: (len(x), -np.sum([abs(xx[2] - hz) for xx in x])))[-1] - if len(largest_cc) < 2: - for ne in four_nes: - mesh.add_edge(single_edge_node, ne) - else: - mesh.remove_edges_from([(single_edge_node, ne) for ne in mesh.neighbors(single_edge_node)]) - new_depth = np.mean([xx[2] for xx in largest_cc]) - info_on_pix[(hx, hy)][0]['depth'] = new_depth - info_on_pix[(hx, hy)][0]['disp'] = 1./new_depth - new_node = (hx, hy, new_depth) - mesh = refresh_node(single_edge_node, mesh.node[single_edge_node], new_node, dict(), mesh) - edge_ccs[edge_cc_id] = set([new_node]) - for ne in largest_cc: - mesh.add_edge(new_node, ne) - - mark = np.zeros((mesh.graph['H'], mesh.graph['W'])) - for edge_idx, edge_cc in enumerate(edge_ccs): - for edge_node in edge_cc: - if not (mesh.graph['bord_up'] <= edge_node[0] < mesh.graph['bord_down']-1) or \ - not (mesh.graph['bord_left'] <= edge_node[1] < mesh.graph['bord_right']-1): - continue - mesh_neighbors = [*mesh.neighbors(edge_node)] - mesh_neighbors = [xx for xx in mesh_neighbors \ - if mesh.graph['bord_up'] < xx[0] < mesh.graph['bord_down'] - 1 and \ - mesh.graph['bord_left'] < xx[1] < mesh.graph['bord_right'] - 1] - if len([*mesh.neighbors(edge_node)]) >= 3: - continue - elif len([*mesh.neighbors(edge_node)]) <= 1: - mark[edge_node[0], edge_node[1]] += (len([*mesh.neighbors(edge_node)]) + 1) - else: - dan_ne_node_a = [*mesh.neighbors(edge_node)][0] - dan_ne_node_b = [*mesh.neighbors(edge_node)][1] - if abs(dan_ne_node_a[0] - dan_ne_node_b[0]) > 1 or \ - abs(dan_ne_node_a[1] - dan_ne_node_b[1]) > 1: - mark[edge_node[0], edge_node[1]] += 3 - mxs, mys = np.where(mark == 1) - conn_0_nodes = [(x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth']) for x in zip(mxs, mys) \ - if mesh.has_node((x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth']))] - mxs, mys = np.where(mark == 2) - conn_1_nodes = [(x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth']) for x in zip(mxs, mys) \ - if mesh.has_node((x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth']))] - for node in conn_0_nodes: - hx, hy = node[0], node[1] - four_nes = [(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] \ - if info_on_pix.get((x, y)) is not None] - re_depth = {'value' : 0, 'count': 0} - for ne in four_nes: - mesh.add_edge(node, ne) - re_depth['value'] += cc_node[2] - re_depth['count'] += 1. - re_depth = re_depth['value'] / re_depth['count'] - mapping_dict = {node: (node[0], node[1], re_depth)} - info_on_pix, mesh, edge_mesh = update_info(mapping_dict, info_on_pix, mesh, edge_mesh) - depth[node[0], node[1]] = abs(re_depth) - mark[node[0], node[1]] = 0 - for node in conn_1_nodes: - hx, hy = node[0], node[1] - eight_nes = set([(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1), - (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \ - if info_on_pix.get((x, y)) is not None]) - self_nes = set([ne2 for ne1 in mesh.neighbors(node) for ne2 in mesh.neighbors(ne1) if ne2 in eight_nes]) - eight_nes = [*(eight_nes - self_nes)] - sub_mesh = mesh.subgraph(eight_nes).copy() - ccs = netx.connected_components(sub_mesh) - largest_cc = sorted(ccs, key=lambda x: (len(x), -np.sum([abs(xx[0] - node[0]) + abs(xx[1] - node[1]) for xx in x])))[-1] - - mesh.remove_edges_from([(xx, node) for xx in mesh.neighbors(node)]) - re_depth = {'value' : 0, 'count': 0} - for cc_node in largest_cc: - if cc_node[0] == node[0] and cc_node[1] == node[1]: - continue - re_depth['value'] += cc_node[2] - re_depth['count'] += 1. - if abs(cc_node[0] - node[0]) + abs(cc_node[1] - node[1]) < 2: - mesh.add_edge(cc_node, node) - try: - re_depth = re_depth['value'] / re_depth['count'] - except: - re_depth = node[2] - renode = (node[0], node[1], re_depth) - mapping_dict = {node: renode} - info_on_pix, mesh, edge_mesh = update_info(mapping_dict, info_on_pix, mesh, edge_mesh) - depth[node[0], node[1]] = abs(re_depth) - mark[node[0], node[1]] = 0 - edge_mesh, mesh, mark, info_on_pix = recursive_add_edge(edge_mesh, mesh, info_on_pix, renode, mark) - mxs, mys = np.where(mark == 3) - conn_2_nodes = [(x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth']) for x in zip(mxs, mys) \ - if mesh.has_node((x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth'])) and \ - mesh.degree((x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth'])) == 2] - sub_mesh = mesh.subgraph(conn_2_nodes).copy() - ccs = netx.connected_components(sub_mesh) - for cc in ccs: - candidate_nodes = [xx for xx in cc if sub_mesh.degree(xx) == 1] - for node in candidate_nodes: - if mesh.has_node(node) is False: - continue - ne_node = [xx for xx in mesh.neighbors(node) if xx not in cc][0] - hx, hy = node[0], node[1] - eight_nes = set([(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1), - (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \ - if info_on_pix.get((x, y)) is not None and (x, y, info_on_pix[(x, y)][0]['depth']) not in cc]) - ne_sub_mesh = mesh.subgraph(eight_nes).copy() - ne_ccs = netx.connected_components(ne_sub_mesh) - try: - ne_cc = [ne_cc for ne_cc in ne_ccs if ne_node in ne_cc][0] - except: - import pdb; pdb.set_trace() - largest_cc = [xx for xx in ne_cc if abs(xx[0] - node[0]) + abs(xx[1] - node[1]) == 1] - mesh.remove_edges_from([(xx, node) for xx in mesh.neighbors(node)]) - re_depth = {'value' : 0, 'count': 0} - for cc_node in largest_cc: - re_depth['value'] += cc_node[2] - re_depth['count'] += 1. - mesh.add_edge(cc_node, node) - try: - re_depth = re_depth['value'] / re_depth['count'] - except: - re_depth = node[2] - renode = (node[0], node[1], re_depth) - mapping_dict = {node: renode} - info_on_pix, mesh, edge_mesh = update_info(mapping_dict, info_on_pix, mesh, edge_mesh) - depth[node[0], node[1]] = abs(re_depth) - mark[node[0], node[1]] = 0 - edge_mesh, mesh, mark, info_on_pix = recursive_add_edge(edge_mesh, mesh, info_on_pix, renode, mark) - break - if len(cc) == 1: - node = [node for node in cc][0] - hx, hy = node[0], node[1] - nine_nes = set([(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx, hy), (hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1), - (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \ - if info_on_pix.get((x, y)) is not None and mesh.has_node((x, y, info_on_pix[(x, y)][0]['depth']))]) - ne_sub_mesh = mesh.subgraph(nine_nes).copy() - ne_ccs = netx.connected_components(ne_sub_mesh) - for ne_cc in ne_ccs: - if node in ne_cc: - re_depth = {'value' : 0, 'count': 0} - for ne in ne_cc: - if abs(ne[0] - node[0]) + abs(ne[1] - node[1]) == 1: - mesh.add_edge(node, ne) - re_depth['value'] += ne[2] - re_depth['count'] += 1. - re_depth = re_depth['value'] / re_depth['count'] - mapping_dict = {node: (node[0], node[1], re_depth)} - info_on_pix, mesh, edge_mesh = update_info(mapping_dict, info_on_pix, mesh, edge_mesh) - depth[node[0], node[1]] = abs(re_depth) - mark[node[0], node[1]] = 0 - - - return mesh, info_on_pix, edge_mesh, depth, mark - -def context_and_holes(mesh, edge_ccs, config, specific_edge_id, specific_edge_loc, depth_feat_model, - connect_points_ccs=None, inpaint_iter=0, filter_edge=False, vis_edge_id=None): - edge_maps = np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1 - mask_info = {} - for edge_id, edge_cc in enumerate(edge_ccs): - for edge_node in edge_cc: - edge_maps[edge_node[0], edge_node[1]] = edge_id - - context_ccs = [set() for x in range(len(edge_ccs))] - extend_context_ccs = [set() for x in range(len(edge_ccs))] - extend_erode_context_ccs = [set() for x in range(len(edge_ccs))] - extend_edge_ccs = [set() for x in range(len(edge_ccs))] - accomp_extend_context_ccs = [set() for x in range(len(edge_ccs))] - erode_context_ccs = [set() for x in range(len(edge_ccs))] - broken_mask_ccs = [set() for x in range(len(edge_ccs))] - invalid_extend_edge_ccs = [set() for x in range(len(edge_ccs))] - intouched_ccs = [set() for x in range(len(edge_ccs))] - redundant_ccs = [set() for x in range(len(edge_ccs))] - if inpaint_iter == 0: - background_thickness = config['background_thickness'] - context_thickness = config['context_thickness'] - else: - background_thickness = config['background_thickness_2'] - context_thickness = config['context_thickness_2'] - - mesh_nodes = mesh.nodes - for edge_id, edge_cc in enumerate(edge_ccs): - if context_thickness == 0 or (len(specific_edge_id) > 0 and edge_id not in specific_edge_id): - continue - edge_group = {} - for edge_node in edge_cc: - far_nodes = mesh_nodes[edge_node].get('far') - if far_nodes is None: - continue - for far_node in far_nodes: - if far_node in edge_cc: - continue - context_ccs[edge_id].add(far_node) - if mesh_nodes[far_node].get('edge_id') is not None: - if edge_group.get(mesh_nodes[far_node]['edge_id']) is None: - edge_group[mesh_nodes[far_node]['edge_id']] = set() - edge_group[mesh_nodes[far_node]['edge_id']].add(far_node) - if len(edge_cc) > 2: - for edge_key in [*edge_group.keys()]: - if len(edge_group[edge_key]) == 1: - context_ccs[edge_id].remove([*edge_group[edge_key]][0]) - for edge_id, edge_cc in enumerate(edge_ccs): - if inpaint_iter != 0: - continue - tmp_intouched_nodes = set() - for edge_node in edge_cc: - raw_intouched_nodes = set(mesh_nodes[edge_node].get('near')) if mesh_nodes[edge_node].get('near') is not None else set() - tmp_intouched_nodes |= set([xx for xx in raw_intouched_nodes if mesh_nodes[xx].get('edge_id') is not None and \ - len(context_ccs[mesh_nodes[xx].get('edge_id')]) > 0]) - intouched_ccs[edge_id] |= tmp_intouched_nodes - tmp_intouched_nodes = None - mask_ccs = copy.deepcopy(edge_ccs) - forbidden_len = 3 - forbidden_map = np.ones((mesh.graph['H'] - forbidden_len, mesh.graph['W'] - forbidden_len)) - forbidden_map = np.pad(forbidden_map, ((forbidden_len, forbidden_len), (forbidden_len, forbidden_len)), mode='constant').astype(np.bool) - cur_tmp_mask_map = np.zeros_like(forbidden_map).astype(np.bool) - passive_background = 10 if 10 is not None else background_thickness - passive_context = 1 if 1 is not None else context_thickness - - for edge_id, edge_cc in enumerate(edge_ccs): - cur_mask_cc = None; cur_mask_cc = [] - cur_context_cc = None; cur_context_cc = [] - cur_accomp_near_cc = None; cur_accomp_near_cc = [] - cur_invalid_extend_edge_cc = None; cur_invalid_extend_edge_cc = [] - cur_comp_far_cc = None; cur_comp_far_cc = [] - tmp_erode = [] - if len(context_ccs[edge_id]) == 0 or (len(specific_edge_id) > 0 and edge_id not in specific_edge_id): - continue - for i in range(max(background_thickness, context_thickness)): - cur_tmp_mask_map.fill(False) - if i == 0: - tmp_mask_nodes = copy.deepcopy(mask_ccs[edge_id]) - tmp_intersect_nodes = [] - tmp_intersect_context_nodes = [] - mask_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool) - context_depth = np.zeros((mesh.graph['H'], mesh.graph['W'])) - comp_cnt_depth = np.zeros((mesh.graph['H'], mesh.graph['W'])) - connect_map = np.zeros((mesh.graph['H'], mesh.graph['W'])) - for node in tmp_mask_nodes: - mask_map[node[0], node[1]] = True - depth_count = 0 - if mesh_nodes[node].get('far') is not None: - for comp_cnt_node in mesh_nodes[node]['far']: - comp_cnt_depth[node[0], node[1]] += abs(comp_cnt_node[2]) - depth_count += 1 - if depth_count > 0: - comp_cnt_depth[node[0], node[1]] = comp_cnt_depth[node[0], node[1]] / depth_count - connect_node = [] - if mesh_nodes[node].get('connect_point_id') is not None: - connect_node.append(mesh_nodes[node]['connect_point_id']) - connect_point_id = np.bincount(connect_node).argmax() if len(connect_node) > 0 else -1 - if connect_point_id > -1 and connect_points_ccs is not None: - for xx in connect_points_ccs[connect_point_id]: - if connect_map[xx[0], xx[1]] == 0: - connect_map[xx[0], xx[1]] = xx[2] - if mesh_nodes[node].get('connect_point_exception') is not None: - for xx in mesh_nodes[node]['connect_point_exception']: - if connect_map[xx[0], xx[1]] == 0: - connect_map[xx[0], xx[1]] = xx[2] - tmp_context_nodes = [*context_ccs[edge_id]] - tmp_erode.append([*context_ccs[edge_id]]) - context_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool) - if (context_map.astype(np.uint8) * mask_map.astype(np.uint8)).max() > 0: - import pdb; pdb.set_trace() - for node in tmp_context_nodes: - context_map[node[0], node[1]] = True - context_depth[node[0], node[1]] = node[2] - context_map[mask_map == True] = False - if (context_map.astype(np.uint8) * mask_map.astype(np.uint8)).max() > 0: - import pdb; pdb.set_trace() - tmp_intouched_nodes = [*intouched_ccs[edge_id]] - intouched_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool) - for node in tmp_intouched_nodes: intouched_map[node[0], node[1]] = True - intouched_map[mask_map == True] = False - tmp_redundant_nodes = set() - tmp_noncont_nodes = set() - noncont_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool) - intersect_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool) - intersect_context_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool) - if i > passive_background and inpaint_iter == 0: - new_tmp_intersect_nodes = None - new_tmp_intersect_nodes = [] - for node in tmp_intersect_nodes: - nes = mesh.neighbors(node) - for ne in nes: - if bool(context_map[ne[0], ne[1]]) is False and \ - bool(mask_map[ne[0], ne[1]]) is False and \ - bool(forbidden_map[ne[0], ne[1]]) is True and \ - bool(intouched_map[ne[0], ne[1]]) is False and\ - bool(intersect_map[ne[0], ne[1]]) is False and\ - bool(intersect_context_map[ne[0], ne[1]]) is False: - break_flag = False - if (i - passive_background) % 2 == 0 and (i - passive_background) % 8 != 0: - four_nes = [xx for xx in[[ne[0] - 1, ne[1]], [ne[0] + 1, ne[1]], [ne[0], ne[1] - 1], [ne[0], ne[1] + 1]] \ - if 0 <= xx[0] < mesh.graph['H'] and 0 <= xx[1] < mesh.graph['W']] - for fne in four_nes: - if bool(mask_map[fne[0], fne[1]]) is True: - break_flag = True - break - if break_flag is True: - continue - intersect_map[ne[0], ne[1]] = True - new_tmp_intersect_nodes.append(ne) - tmp_intersect_nodes = None - tmp_intersect_nodes = new_tmp_intersect_nodes - - if i > passive_context and inpaint_iter == 1: - new_tmp_intersect_context_nodes = None - new_tmp_intersect_context_nodes = [] - for node in tmp_intersect_context_nodes: - nes = mesh.neighbors(node) - for ne in nes: - if bool(context_map[ne[0], ne[1]]) is False and \ - bool(mask_map[ne[0], ne[1]]) is False and \ - bool(forbidden_map[ne[0], ne[1]]) is True and \ - bool(intouched_map[ne[0], ne[1]]) is False and\ - bool(intersect_map[ne[0], ne[1]]) is False and \ - bool(intersect_context_map[ne[0], ne[1]]) is False: - intersect_context_map[ne[0], ne[1]] = True - new_tmp_intersect_context_nodes.append(ne) - tmp_intersect_context_nodes = None - tmp_intersect_context_nodes = new_tmp_intersect_context_nodes - - new_tmp_mask_nodes = None - new_tmp_mask_nodes = [] - for node in tmp_mask_nodes: - four_nes = {xx:[] for xx in [(node[0] - 1, node[1]), (node[0] + 1, node[1]), (node[0], node[1] - 1), (node[0], node[1] + 1)] if \ - 0 <= xx[0] < connect_map.shape[0] and 0 <= xx[1] < connect_map.shape[1]} - if inpaint_iter > 0: - for ne in four_nes.keys(): - if connect_map[ne[0], ne[1]] == True: - tmp_context_nodes.append((ne[0], ne[1], connect_map[ne[0], ne[1]])) - context_map[ne[0], ne[1]] = True - nes = mesh.neighbors(node) - if inpaint_iter > 0: - for ne in nes: four_nes[(ne[0], ne[1])].append(ne[2]) - nes = [] - for kfne, vfnes in four_nes.items(): vfnes.sort(key = lambda xx: abs(xx), reverse=True) - for kfne, vfnes in four_nes.items(): - for vfne in vfnes: nes.append((kfne[0], kfne[1], vfne)) - for ne in nes: - if bool(context_map[ne[0], ne[1]]) is False and \ - bool(mask_map[ne[0], ne[1]]) is False and \ - bool(forbidden_map[ne[0], ne[1]]) is True and \ - bool(intouched_map[ne[0], ne[1]]) is False and \ - bool(intersect_map[ne[0], ne[1]]) is False and \ - bool(intersect_context_map[ne[0], ne[1]]) is False: - if i == passive_background and inpaint_iter == 0: - if np.any(context_map[max(ne[0] - 1, 0):min(ne[0] + 2, mesh.graph['H']), max(ne[1] - 1, 0):min(ne[1] + 2, mesh.graph['W'])]) == True: - intersect_map[ne[0], ne[1]] = True - tmp_intersect_nodes.append(ne) - continue - if i < background_thickness: - if inpaint_iter == 0: - cur_mask_cc.append(ne) - elif mesh_nodes[ne].get('inpaint_id') == 1: - cur_mask_cc.append(ne) - else: - continue - mask_ccs[edge_id].add(ne) - if inpaint_iter == 0: - if comp_cnt_depth[node[0], node[1]] > 0 and comp_cnt_depth[ne[0], ne[1]] == 0: - comp_cnt_depth[ne[0], ne[1]] = comp_cnt_depth[node[0], node[1]] - if mesh_nodes[ne].get('far') is not None: - for comp_far_node in mesh_nodes[ne]['far']: - cur_comp_far_cc.append(comp_far_node) - cur_accomp_near_cc.append(ne) - cur_invalid_extend_edge_cc.append(comp_far_node) - if mesh_nodes[ne].get('edge_id') is not None and \ - len(context_ccs[mesh_nodes[ne].get('edge_id')]) > 0: - intouched_fars = set(mesh_nodes[ne].get('far')) if mesh_nodes[ne].get('far') is not None else set() - accum_intouched_fars = set(intouched_fars) - for intouched_far in intouched_fars: - accum_intouched_fars |= set([*mesh.neighbors(intouched_far)]) - for intouched_far in accum_intouched_fars: - if bool(mask_map[intouched_far[0], intouched_far[1]]) is True or \ - bool(context_map[intouched_far[0], intouched_far[1]]) is True: - continue - tmp_redundant_nodes.add(intouched_far) - intouched_map[intouched_far[0], intouched_far[1]] = True - if mesh_nodes[ne].get('near') is not None: - intouched_nears = set(mesh_nodes[ne].get('near')) - for intouched_near in intouched_nears: - if bool(mask_map[intouched_near[0], intouched_near[1]]) is True or \ - bool(context_map[intouched_near[0], intouched_near[1]]) is True: - continue - tmp_redundant_nodes.add(intouched_near) - intouched_map[intouched_near[0], intouched_near[1]] = True - if not (mesh_nodes[ne].get('inpaint_id') != 1 and inpaint_iter == 1): - new_tmp_mask_nodes.append(ne) - mask_map[ne[0], ne[1]] = True - tmp_mask_nodes = new_tmp_mask_nodes - - new_tmp_context_nodes = None - new_tmp_context_nodes = [] - for node in tmp_context_nodes: - nes = mesh.neighbors(node) - if inpaint_iter > 0: - four_nes = {(node[0] - 1, node[1]):[], (node[0] + 1, node[1]):[], (node[0], node[1] - 1):[], (node[0], node[1] + 1):[]} - for ne in nes: four_nes[(ne[0], ne[1])].append(ne[2]) - nes = [] - for kfne, vfnes in four_nes.items(): vfnes.sort(key = lambda xx: abs(xx), reverse=True) - for kfne, vfnes in four_nes.items(): - for vfne in vfnes: nes.append((kfne[0], kfne[1], vfne)) - for ne in nes: - mask_flag = (bool(mask_map[ne[0], ne[1]]) is False) - if bool(context_map[ne[0], ne[1]]) is False and mask_flag and \ - bool(forbidden_map[ne[0], ne[1]]) is True and bool(noncont_map[ne[0], ne[1]]) is False and \ - bool(intersect_context_map[ne[0], ne[1]]) is False: - if i == passive_context and inpaint_iter == 1: - mnes = mesh.neighbors(ne) - if any([mask_map[mne[0], mne[1]] == True for mne in mnes]) is True: - intersect_context_map[ne[0], ne[1]] = True - tmp_intersect_context_nodes.append(ne) - continue - if False and mesh_nodes[ne].get('near') is not None and mesh_nodes[ne].get('edge_id') != edge_id: - noncont_nears = set(mesh_nodes[ne].get('near')) - for noncont_near in noncont_nears: - if bool(context_map[noncont_near[0], noncont_near[1]]) is False: - tmp_noncont_nodes.add(noncont_near) - noncont_map[noncont_near[0], noncont_near[1]] = True - new_tmp_context_nodes.append(ne) - context_map[ne[0], ne[1]] = True - context_depth[ne[0], ne[1]] = ne[2] - cur_context_cc.extend(new_tmp_context_nodes) - tmp_erode.append(new_tmp_context_nodes) - tmp_context_nodes = None - tmp_context_nodes = new_tmp_context_nodes - new_tmp_intouched_nodes = None; new_tmp_intouched_nodes = [] - - for node in tmp_intouched_nodes: - if bool(context_map[node[0], node[1]]) is True or bool(mask_map[node[0], node[1]]) is True: - continue - nes = mesh.neighbors(node) - - for ne in nes: - if bool(context_map[ne[0], ne[1]]) is False and \ - bool(mask_map[ne[0], ne[1]]) is False and \ - bool(intouched_map[ne[0], ne[1]]) is False and \ - bool(forbidden_map[ne[0], ne[1]]) is True: - new_tmp_intouched_nodes.append(ne) - intouched_map[ne[0], ne[1]] = True - tmp_intouched_nodes = None - tmp_intouched_nodes = set(new_tmp_intouched_nodes) - new_tmp_redundant_nodes = None; new_tmp_redundant_nodes = [] - for node in tmp_redundant_nodes: - if bool(context_map[node[0], node[1]]) is True or \ - bool(mask_map[node[0], node[1]]) is True: - continue - nes = mesh.neighbors(node) - - for ne in nes: - if bool(context_map[ne[0], ne[1]]) is False and \ - bool(mask_map[ne[0], ne[1]]) is False and \ - bool(intouched_map[ne[0], ne[1]]) is False and \ - bool(forbidden_map[ne[0], ne[1]]) is True: - new_tmp_redundant_nodes.append(ne) - intouched_map[ne[0], ne[1]] = True - tmp_redundant_nodes = None - tmp_redundant_nodes = set(new_tmp_redundant_nodes) - new_tmp_noncont_nodes = None; new_tmp_noncont_nodes = [] - for node in tmp_noncont_nodes: - if bool(context_map[node[0], node[1]]) is True or \ - bool(mask_map[node[0], node[1]]) is True: - continue - nes = mesh.neighbors(node) - rmv_flag = False - for ne in nes: - if bool(context_map[ne[0], ne[1]]) is False and \ - bool(mask_map[ne[0], ne[1]]) is False and \ - bool(noncont_map[ne[0], ne[1]]) is False and \ - bool(forbidden_map[ne[0], ne[1]]) is True: - patch_context_map = context_map[max(ne[0] - 1, 0):min(ne[0] + 2, context_map.shape[0]), - max(ne[1] - 1, 0):min(ne[1] + 2, context_map.shape[1])] - if bool(np.any(patch_context_map)) is True: - new_tmp_noncont_nodes.append(ne) - noncont_map[ne[0], ne[1]] = True - tmp_noncont_nodes = None - tmp_noncont_nodes = set(new_tmp_noncont_nodes) - if inpaint_iter == 0: - depth_dict = get_depth_from_maps(context_map, mask_map, context_depth, mesh.graph['H'], mesh.graph['W'], log_depth=config['log_depth']) - mask_size = get_valid_size(depth_dict['mask']) - mask_size = dilate_valid_size(mask_size, depth_dict['mask'], dilate=[20, 20]) - context_size = get_valid_size(depth_dict['context']) - context_size = dilate_valid_size(context_size, depth_dict['context'], dilate=[20, 20]) - union_size = size_operation(mask_size, context_size, operation='+') - depth_dict = depth_inpainting(None, None, None, None, mesh, config, union_size, depth_feat_model, None, given_depth_dict=depth_dict, spdb=False) - near_depth_map, raw_near_depth_map = np.zeros((mesh.graph['H'], mesh.graph['W'])), np.zeros((mesh.graph['H'], mesh.graph['W'])) - filtered_comp_far_cc, filtered_accomp_near_cc = set(), set() - for node in cur_accomp_near_cc: - near_depth_map[node[0], node[1]] = depth_dict['output'][node[0], node[1]] - raw_near_depth_map[node[0], node[1]] = node[2] - for node in cur_comp_far_cc: - four_nes = [xx for xx in [(node[0] - 1, node[1]), (node[0] + 1, node[1]), (node[0], node[1] - 1), (node[0], node[1] + 1)] \ - if 0 <= xx[0] < mesh.graph['H'] and 0 <= xx[1] < mesh.graph['W'] and \ - near_depth_map[xx[0], xx[1]] != 0 and \ - abs(near_depth_map[xx[0], xx[1]]) < abs(node[2])] - if len(four_nes) > 0: - filtered_comp_far_cc.add(node) - for ne in four_nes: - filtered_accomp_near_cc.add((ne[0], ne[1], -abs(raw_near_depth_map[ne[0], ne[1]]))) - cur_comp_far_cc, cur_accomp_near_cc = filtered_comp_far_cc, filtered_accomp_near_cc - mask_ccs[edge_id] |= set(cur_mask_cc) - context_ccs[edge_id] |= set(cur_context_cc) - accomp_extend_context_ccs[edge_id] |= set(cur_accomp_near_cc).intersection(cur_mask_cc) - extend_edge_ccs[edge_id] |= set(cur_accomp_near_cc).intersection(cur_mask_cc) - extend_context_ccs[edge_id] |= set(cur_comp_far_cc) - invalid_extend_edge_ccs[edge_id] |= set(cur_invalid_extend_edge_cc) - erode_size = [0] - for tmp in tmp_erode: - erode_size.append(len(tmp)) - if len(erode_size) > 1: - erode_size[-1] += erode_size[-2] - if inpaint_iter == 0: - tmp_width = config['depth_edge_dilate'] - else: - tmp_width = 0 - while float(erode_size[tmp_width]) / (erode_size[-1] + 1e-6) > 0.3: - tmp_width = tmp_width - 1 - try: - if tmp_width == 0: - erode_context_ccs[edge_id] = set([]) - else: - erode_context_ccs[edge_id] = set(reduce(lambda x, y : x + y, [] + tmp_erode[:tmp_width])) - except: - import pdb; pdb.set_trace() - erode_context_cc = copy.deepcopy(erode_context_ccs[edge_id]) - for erode_context_node in erode_context_cc: - if (inpaint_iter != 0 and (mesh_nodes[erode_context_node].get('inpaint_id') is None or - mesh_nodes[erode_context_node].get('inpaint_id') == 0)): - erode_context_ccs[edge_id].remove(erode_context_node) - else: - context_ccs[edge_id].remove(erode_context_node) - context_map = np.zeros((mesh.graph['H'], mesh.graph['W'])) - for context_node in context_ccs[edge_id]: - context_map[context_node[0], context_node[1]] = 1 - extend_context_ccs[edge_id] = extend_context_ccs[edge_id] - mask_ccs[edge_id] - accomp_extend_context_ccs[edge_id] - if inpaint_iter == 0: - all_ecnt_cc = set() - for ecnt_id, ecnt_cc in enumerate(extend_context_ccs): - constraint_context_ids = set() - constraint_context_cc = set() - constraint_erode_context_cc = set() - tmp_mask_cc = set() - accum_context_cc = None; accum_context_cc = [] - for ecnt_node in accomp_extend_context_ccs[ecnt_id]: - if edge_maps[ecnt_node[0], ecnt_node[1]] > -1: - constraint_context_ids.add(int(round(edge_maps[ecnt_node[0], ecnt_node[1]]))) - constraint_erode_context_cc = erode_context_ccs[ecnt_id] - for constraint_context_id in constraint_context_ids: - constraint_context_cc = constraint_context_cc | context_ccs[constraint_context_id] | erode_context_ccs[constraint_context_id] - constraint_erode_context_cc = constraint_erode_context_cc | erode_context_ccs[constraint_context_id] - for i in range(background_thickness): - if i == 0: - tmp_context_nodes = copy.deepcopy(ecnt_cc) - tmp_invalid_context_nodes = copy.deepcopy(invalid_extend_edge_ccs[ecnt_id]) - tmp_mask_nodes = copy.deepcopy(accomp_extend_context_ccs[ecnt_id]) - tmp_context_map = np.zeros((mesh.graph['H'], mesh.graph['W'])).astype(np.bool) - tmp_mask_map = np.zeros((mesh.graph['H'], mesh.graph['W'])).astype(np.bool) - tmp_invalid_context_map = np.zeros((mesh.graph['H'], mesh.graph['W'])).astype(np.bool) - for node in tmp_mask_nodes: - tmp_mask_map[node[0], node[1]] = True - for node in context_ccs[ecnt_id]: - tmp_context_map[node[0], node[1]] = True - for node in erode_context_ccs[ecnt_id]: - tmp_context_map[node[0], node[1]] = True - for node in extend_context_ccs[ecnt_id]: - tmp_context_map[node[0], node[1]] = True - for node in invalid_extend_edge_ccs[ecnt_id]: - tmp_invalid_context_map[node[0], node[1]] = True - init_invalid_context_map = tmp_invalid_context_map.copy() - init_context_map = tmp - if (tmp_mask_map.astype(np.uint8) * tmp_context_map.astype(np.uint8)).max() > 0: - import pdb; pdb.set_trace() - if vis_edge_id is not None and ecnt_id == vis_edge_id: - f, ((ax1, ax2)) = plt.subplots(1, 2, sharex=True, sharey=True) - ax1.imshow(tmp_context_map * 1); ax2.imshow(init_invalid_context_map * 1 + tmp_context_map * 2) - plt.show() - import pdb; pdb.set_trace() - else: - tmp_context_nodes = new_tmp_context_nodes - new_tmp_context_nodes = None - tmp_mask_nodes = new_tmp_mask_nodes - new_tmp_mask_nodes = None - tmp_invalid_context_nodes = new_tmp_invalid_context_nodes - new_tmp_invalid_context_nodes = None - new_tmp_context_nodes = None - new_tmp_context_nodes = [] - new_tmp_invalid_context_nodes = None - new_tmp_invalid_context_nodes = [] - new_tmp_mask_nodes = set([]) - for node in tmp_context_nodes: - for ne in mesh.neighbors(node): - if ne in constraint_context_cc and \ - bool(tmp_mask_map[ne[0], ne[1]]) is False and \ - bool(tmp_context_map[ne[0], ne[1]]) is False and \ - bool(forbidden_map[ne[0], ne[1]]) is True: - new_tmp_context_nodes.append(ne) - tmp_context_map[ne[0], ne[1]] = True - accum_context_cc.extend(new_tmp_context_nodes) - for node in tmp_invalid_context_nodes: - for ne in mesh.neighbors(node): - if bool(tmp_mask_map[ne[0], ne[1]]) is False and \ - bool(tmp_context_map[ne[0], ne[1]]) is False and \ - bool(tmp_invalid_context_map[ne[0], ne[1]]) is False and \ - bool(forbidden_map[ne[0], ne[1]]) is True: - tmp_invalid_context_map[ne[0], ne[1]] = True - new_tmp_invalid_context_nodes.append(ne) - for node in tmp_mask_nodes: - for ne in mesh.neighbors(node): - if bool(tmp_mask_map[ne[0], ne[1]]) is False and \ - bool(tmp_context_map[ne[0], ne[1]]) is False and \ - bool(tmp_invalid_context_map[ne[0], ne[1]]) is False and \ - bool(forbidden_map[ne[0], ne[1]]) is True: - new_tmp_mask_nodes.add(ne) - tmp_mask_map[ne[0], ne[1]] = True - init_invalid_context_map[tmp_context_map] = False - _, tmp_label_map = cv2.connectedComponents((init_invalid_context_map | tmp_context_map).astype(np.uint8), connectivity=8) - tmp_label_ids = set(np.unique(tmp_label_map[init_invalid_context_map])) - if (tmp_mask_map.astype(np.uint8) * tmp_context_map.astype(np.uint8)).max() > 0: - import pdb; pdb.set_trace() - if vis_edge_id is not None and ecnt_id == vis_edge_id: - f, ((ax1, ax2)) = plt.subplots(1, 2, sharex=True, sharey=True) - ax1.imshow(tmp_label_map); ax2.imshow(init_invalid_context_map * 1 + tmp_context_map * 2) - plt.show() - import pdb; pdb.set_trace() - extend_context_ccs[ecnt_id] |= set(accum_context_cc) - extend_context_ccs[ecnt_id] = extend_context_ccs[ecnt_id] - mask_ccs[ecnt_id] - extend_erode_context_ccs[ecnt_id] = extend_context_ccs[ecnt_id] & constraint_erode_context_cc - extend_context_ccs[ecnt_id] = extend_context_ccs[ecnt_id] - extend_erode_context_ccs[ecnt_id] - erode_context_ccs[ecnt_id] - tmp_context_cc = context_ccs[ecnt_id] - extend_erode_context_ccs[ecnt_id] - erode_context_ccs[ecnt_id] - if len(tmp_context_cc) > 0: - context_ccs[ecnt_id] = tmp_context_cc - tmp_mask_cc = tmp_mask_cc - context_ccs[ecnt_id] - erode_context_ccs[ecnt_id] - mask_ccs[ecnt_id] = mask_ccs[ecnt_id] | tmp_mask_cc - - return context_ccs, mask_ccs, broken_mask_ccs, edge_ccs, erode_context_ccs, invalid_extend_edge_ccs, edge_maps, extend_context_ccs, extend_edge_ccs, extend_erode_context_ccs - -def DL_inpaint_edge(mesh, - info_on_pix, - config, - image, - depth, - context_ccs, - erode_context_ccs, - extend_context_ccs, - extend_erode_context_ccs, - mask_ccs, - broken_mask_ccs, - edge_ccs, - extend_edge_ccs, - init_mask_connect, - edge_maps, - rgb_model=None, - depth_edge_model=None, - depth_edge_model_init=None, - depth_feat_model=None, - specific_edge_id=-1, - specific_edge_loc=None, - inpaint_iter=0): - - if isinstance(config["gpu_ids"], int) and (config["gpu_ids"] >= 0): - device = config["gpu_ids"] - else: - device = "cpu" - - edge_map = np.zeros_like(depth) - new_edge_ccs = [set() for _ in range(len(edge_ccs))] - edge_maps_with_id = edge_maps - edge_condition = lambda x, m: m.nodes[x].get('far') is not None and len(m.nodes[x].get('far')) > 0 - edge_map = get_map_from_ccs(edge_ccs, mesh.graph['H'], mesh.graph['W'], mesh, edge_condition) - np_depth, np_image = depth.copy(), image.copy() - image_c = image.shape[-1] - image = torch.FloatTensor(image.transpose(2, 0, 1)).unsqueeze(0).to(device) - if depth.ndim < 3: - depth = depth[..., None] - depth = torch.FloatTensor(depth.transpose(2, 0, 1)).unsqueeze(0).to(device) - mesh.graph['max_edge_id'] = len(edge_ccs) - connnect_points_ccs = [set() for _ in range(len(edge_ccs))] - gp_time, tmp_mesh_time, bilateral_time = 0, 0, 0 - edges_infos = dict() - edges_in_mask = [set() for _ in range(len(edge_ccs))] - tmp_specific_edge_id = [] - for edge_id, (context_cc, mask_cc, erode_context_cc, extend_context_cc, edge_cc) in enumerate(zip(context_ccs, mask_ccs, erode_context_ccs, extend_context_ccs, edge_ccs)): - if len(specific_edge_id) > 0: - if edge_id not in specific_edge_id: - continue - if len(context_cc) < 1 or len(mask_cc) < 1: - continue - edge_dict = get_edge_from_nodes(context_cc | extend_context_cc, erode_context_cc | extend_erode_context_ccs[edge_id], mask_cc, edge_cc, extend_edge_ccs[edge_id], - mesh.graph['H'], mesh.graph['W'], mesh) - edge_dict['edge'], end_depth_maps, _ = \ - filter_irrelevant_edge_new(edge_dict['self_edge'], edge_dict['comp_edge'], - edge_map, - edge_maps_with_id, - edge_id, - edge_dict['context'], - edge_dict['depth'], mesh, context_cc | erode_context_cc | extend_context_cc | extend_erode_context_ccs[edge_id], spdb=False) - if specific_edge_loc is not None and \ - (specific_edge_loc is not None and edge_dict['mask'][specific_edge_loc[0], specific_edge_loc[1]] == 0): - continue - mask_size = get_valid_size(edge_dict['mask']) - mask_size = dilate_valid_size(mask_size, edge_dict['mask'], dilate=[20, 20]) - context_size = get_valid_size(edge_dict['context']) - context_size = dilate_valid_size(context_size, edge_dict['context'], dilate=[20, 20]) - union_size = size_operation(mask_size, context_size, operation='+') - patch_edge_dict = dict() - patch_edge_dict['mask'], patch_edge_dict['context'], patch_edge_dict['rgb'], \ - patch_edge_dict['disp'], patch_edge_dict['edge'] = \ - crop_maps_by_size(union_size, edge_dict['mask'], edge_dict['context'], - edge_dict['rgb'], edge_dict['disp'], edge_dict['edge']) - x_anchor, y_anchor = [union_size['x_min'], union_size['x_max']], [union_size['y_min'], union_size['y_max']] - tensor_edge_dict = convert2tensor(patch_edge_dict) - input_edge_feat = torch.cat((tensor_edge_dict['rgb'], - tensor_edge_dict['disp'], - tensor_edge_dict['edge'], - 1 - tensor_edge_dict['context'], - tensor_edge_dict['mask']), dim=1) - if require_depth_edge(patch_edge_dict['edge'], patch_edge_dict['mask']) and inpaint_iter == 0: - with torch.no_grad(): - depth_edge_output = depth_edge_model.forward_3P(tensor_edge_dict['mask'], - tensor_edge_dict['context'], - tensor_edge_dict['rgb'], - tensor_edge_dict['disp'], - tensor_edge_dict['edge'], - unit_length=128, - cuda=device) - depth_edge_output = depth_edge_output.cpu() - tensor_edge_dict['output'] = (depth_edge_output> config['ext_edge_threshold']).float() * tensor_edge_dict['mask'] + tensor_edge_dict['edge'] - else: - tensor_edge_dict['output'] = tensor_edge_dict['edge'] - depth_edge_output = tensor_edge_dict['edge'] + 0 - patch_edge_dict['output'] = tensor_edge_dict['output'].squeeze().data.cpu().numpy() - edge_dict['output'] = np.zeros((mesh.graph['H'], mesh.graph['W'])) - edge_dict['output'][union_size['x_min']:union_size['x_max'], union_size['y_min']:union_size['y_max']] = \ - patch_edge_dict['output'] - if require_depth_edge(patch_edge_dict['edge'], patch_edge_dict['mask']) and inpaint_iter == 0: - if ((depth_edge_output> config['ext_edge_threshold']).float() * tensor_edge_dict['mask']).max() > 0: - try: - edge_dict['fpath_map'], edge_dict['npath_map'], break_flag, npaths, fpaths, invalid_edge_id = \ - clean_far_edge_new(edge_dict['output'], end_depth_maps, edge_dict['mask'], edge_dict['context'], mesh, info_on_pix, edge_dict['self_edge'], inpaint_iter, config) - except: - import pdb; pdb.set_trace() - pre_npath_map = edge_dict['npath_map'].copy() - if config.get('repeat_inpaint_edge') is True: - for _ in range(2): - tmp_input_edge = ((edge_dict['npath_map'] > -1) + edge_dict['edge']).clip(0, 1) - patch_tmp_input_edge = crop_maps_by_size(union_size, tmp_input_edge)[0] - tensor_input_edge = torch.FloatTensor(patch_tmp_input_edge)[None, None, ...] - depth_edge_output = depth_edge_model.forward_3P(tensor_edge_dict['mask'], - tensor_edge_dict['context'], - tensor_edge_dict['rgb'], - tensor_edge_dict['disp'], - tensor_input_edge, - unit_length=128, - cuda=device) - depth_edge_output = depth_edge_output.cpu() - depth_edge_output = (depth_edge_output> config['ext_edge_threshold']).float() * tensor_edge_dict['mask'] + tensor_edge_dict['edge'] - depth_edge_output = depth_edge_output.squeeze().data.cpu().numpy() - full_depth_edge_output = np.zeros((mesh.graph['H'], mesh.graph['W'])) - full_depth_edge_output[union_size['x_min']:union_size['x_max'], union_size['y_min']:union_size['y_max']] = \ - depth_edge_output - edge_dict['fpath_map'], edge_dict['npath_map'], break_flag, npaths, fpaths, invalid_edge_id = \ - clean_far_edge_new(full_depth_edge_output, end_depth_maps, edge_dict['mask'], edge_dict['context'], mesh, info_on_pix, edge_dict['self_edge'], inpaint_iter, config) - for nid in npaths.keys(): - npath, fpath = npaths[nid], fpaths[nid] - start_mx, start_my, end_mx, end_my = -1, -1, -1, -1 - if end_depth_maps[npath[0][0], npath[0][1]] != 0: - start_mx, start_my = npath[0][0], npath[0][1] - if end_depth_maps[npath[-1][0], npath[-1][1]] != 0: - end_mx, end_my = npath[-1][0], npath[-1][1] - if start_mx == -1: - import pdb; pdb.set_trace() - valid_end_pt = () if end_mx == -1 else (end_mx, end_my, info_on_pix[(end_mx, end_my)][0]['depth']) - new_edge_info = dict(fpath=fpath, - npath=npath, - cont_end_pts=valid_end_pt, - mask_id=edge_id, - comp_edge_id=nid, - depth=end_depth_maps[start_mx, start_my]) - if edges_infos.get((start_mx, start_my)) is None: - edges_infos[(start_mx, start_my)] = [] - edges_infos[(start_mx, start_my)].append(new_edge_info) - edges_in_mask[edge_id].add((start_mx, start_my)) - if len(valid_end_pt) > 0: - new_edge_info = dict(fpath=fpath[::-1], - npath=npath[::-1], - cont_end_pts=(start_mx, start_my, info_on_pix[(start_mx, start_my)][0]['depth']), - mask_id=edge_id, - comp_edge_id=nid, - depth=end_depth_maps[end_mx, end_my]) - if edges_infos.get((end_mx, end_my)) is None: - edges_infos[(end_mx, end_my)] = [] - edges_infos[(end_mx, end_my)].append(new_edge_info) - edges_in_mask[edge_id].add((end_mx, end_my)) - for edge_id, (context_cc, mask_cc, erode_context_cc, extend_context_cc, edge_cc) in enumerate(zip(context_ccs, mask_ccs, erode_context_ccs, extend_context_ccs, edge_ccs)): - if len(specific_edge_id) > 0: - if edge_id not in specific_edge_id: - continue - if len(context_cc) < 1 or len(mask_cc) < 1: - continue - edge_dict = get_edge_from_nodes(context_cc | extend_context_cc, erode_context_cc | extend_erode_context_ccs[edge_id], mask_cc, edge_cc, extend_edge_ccs[edge_id], - mesh.graph['H'], mesh.graph['W'], mesh) - if specific_edge_loc is not None and \ - (specific_edge_loc is not None and edge_dict['mask'][specific_edge_loc[0], specific_edge_loc[1]] == 0): - continue - else: - tmp_specific_edge_id.append(edge_id) - edge_dict['edge'], end_depth_maps, _ = \ - filter_irrelevant_edge_new(edge_dict['self_edge'], edge_dict['comp_edge'], - edge_map, - edge_maps_with_id, - edge_id, - edge_dict['context'], - edge_dict['depth'], mesh, context_cc | erode_context_cc | extend_context_cc | extend_erode_context_ccs[edge_id], spdb=False) - discard_map = np.zeros_like(edge_dict['edge']) - mask_size = get_valid_size(edge_dict['mask']) - mask_size = dilate_valid_size(mask_size, edge_dict['mask'], dilate=[20, 20]) - context_size = get_valid_size(edge_dict['context']) - context_size = dilate_valid_size(context_size, edge_dict['context'], dilate=[20, 20]) - union_size = size_operation(mask_size, context_size, operation='+') - patch_edge_dict = dict() - patch_edge_dict['mask'], patch_edge_dict['context'], patch_edge_dict['rgb'], \ - patch_edge_dict['disp'], patch_edge_dict['edge'] = \ - crop_maps_by_size(union_size, edge_dict['mask'], edge_dict['context'], - edge_dict['rgb'], edge_dict['disp'], edge_dict['edge']) - x_anchor, y_anchor = [union_size['x_min'], union_size['x_max']], [union_size['y_min'], union_size['y_max']] - tensor_edge_dict = convert2tensor(patch_edge_dict) - input_edge_feat = torch.cat((tensor_edge_dict['rgb'], - tensor_edge_dict['disp'], - tensor_edge_dict['edge'], - 1 - tensor_edge_dict['context'], - tensor_edge_dict['mask']), dim=1) - edge_dict['output'] = edge_dict['edge'].copy() - - if require_depth_edge(patch_edge_dict['edge'], patch_edge_dict['mask']) and inpaint_iter == 0: - edge_dict['fpath_map'], edge_dict['npath_map'] = edge_dict['fpath_map'] * 0 - 1, edge_dict['npath_map'] * 0 - 1 - end_pts = edges_in_mask[edge_id] - for end_pt in end_pts: - cur_edge_infos = edges_infos[(end_pt[0], end_pt[1])] - cur_info = [xx for xx in cur_edge_infos if xx['mask_id'] == edge_id][0] - other_infos = [xx for xx in cur_edge_infos if xx['mask_id'] != edge_id and len(xx['cont_end_pts']) > 0] - if len(cur_info['cont_end_pts']) > 0 or (len(cur_info['cont_end_pts']) == 0 and len(other_infos) == 0): - for fnode in cur_info['fpath']: - edge_dict['fpath_map'][fnode[0], fnode[1]] = cur_info['comp_edge_id'] - for fnode in cur_info['npath']: - edge_dict['npath_map'][fnode[0], fnode[1]] = cur_info['comp_edge_id'] - fnmap = edge_dict['fpath_map'] * 1 - fnmap[edge_dict['npath_map'] != -1] = edge_dict['npath_map'][edge_dict['npath_map'] != -1] - for end_pt in end_pts: - cur_edge_infos = edges_infos[(end_pt[0], end_pt[1])] - cur_info = [xx for xx in cur_edge_infos if xx['mask_id'] == edge_id][0] - cur_depth = cur_info['depth'] - other_infos = [xx for xx in cur_edge_infos if xx['mask_id'] != edge_id and len(xx['cont_end_pts']) > 0] - comp_edge_id = cur_info['comp_edge_id'] - if len(cur_info['cont_end_pts']) == 0 and len(other_infos) > 0: - other_infos = sorted(other_infos, key=lambda aa: abs(abs(aa['cont_end_pts'][2]) - abs(cur_depth))) - for other_info in other_infos: - tmp_fmap, tmp_nmap = np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1, np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1 - for fnode in other_info['fpath']: - if fnmap[fnode[0], fnode[1]] != -1: - tmp_fmap = tmp_fmap * 0 - 1 - break - else: - tmp_fmap[fnode[0], fnode[1]] = comp_edge_id - if fnmap[fnode[0], fnode[1]] != -1: - continue - for fnode in other_info['npath']: - if fnmap[fnode[0], fnode[1]] != -1: - tmp_nmap = tmp_nmap * 0 - 1 - break - else: - tmp_nmap[fnode[0], fnode[1]] = comp_edge_id - if fnmap[fnode[0], fnode[1]] != -1: - continue - break - if min(tmp_fmap.max(), tmp_nmap.max()) != -1: - edge_dict['fpath_map'] = tmp_fmap - edge_dict['fpath_map'][edge_dict['valid_area'] == 0] = -1 - edge_dict['npath_map'] = tmp_nmap - edge_dict['npath_map'][edge_dict['valid_area'] == 0] = -1 - discard_map = ((tmp_nmap != -1).astype(np.uint8) + (tmp_fmap != -1).astype(np.uint8)) * edge_dict['mask'] - else: - for fnode in cur_info['fpath']: - edge_dict['fpath_map'][fnode[0], fnode[1]] = cur_info['comp_edge_id'] - for fnode in cur_info['npath']: - edge_dict['npath_map'][fnode[0], fnode[1]] = cur_info['comp_edge_id'] - if edge_dict['npath_map'].min() == 0 or edge_dict['fpath_map'].min() == 0: - import pdb; pdb.set_trace() - edge_dict['output'] = (edge_dict['npath_map'] > -1) * edge_dict['mask'] + edge_dict['context'] * edge_dict['edge'] - mesh, _, _, _ = create_placeholder(edge_dict['context'], edge_dict['mask'], - edge_dict['depth'], edge_dict['fpath_map'], - edge_dict['npath_map'], mesh, inpaint_iter, - edge_ccs, - extend_edge_ccs[edge_id], - edge_maps_with_id, - edge_id) - - dxs, dys = np.where(discard_map != 0) - for dx, dy in zip(dxs, dys): - mesh.nodes[(dx, dy)]['inpaint_twice'] = False - depth_dict = depth_inpainting(context_cc, extend_context_cc, erode_context_cc | extend_erode_context_ccs[edge_id], mask_cc, mesh, config, union_size, depth_feat_model, edge_dict['output']) - refine_depth_output = depth_dict['output']*depth_dict['mask'] - for near_id in np.unique(edge_dict['npath_map'])[1:]: - refine_depth_output = refine_depth_around_edge(refine_depth_output.copy(), - (edge_dict['fpath_map'] == near_id).astype(np.uint8) * edge_dict['mask'], - (edge_dict['fpath_map'] == near_id).astype(np.uint8), - (edge_dict['npath_map'] == near_id).astype(np.uint8) * edge_dict['mask'], - depth_dict['mask'].copy(), - depth_dict['output'] * depth_dict['context'], - config) - depth_dict['output'][depth_dict['mask'] > 0] = refine_depth_output[depth_dict['mask'] > 0] - rgb_dict = get_rgb_from_nodes(context_cc | extend_context_cc, - erode_context_cc | extend_erode_context_ccs[edge_id], mask_cc, mesh.graph['H'], mesh.graph['W'], mesh) - if np.all(rgb_dict['mask'] == edge_dict['mask']) is False: - import pdb; pdb.set_trace() - rgb_dict['edge'] = edge_dict['output'] - patch_rgb_dict = dict() - patch_rgb_dict['mask'], patch_rgb_dict['context'], patch_rgb_dict['rgb'], \ - patch_rgb_dict['edge'] = crop_maps_by_size(union_size, rgb_dict['mask'], - rgb_dict['context'], rgb_dict['rgb'], - rgb_dict['edge']) - tensor_rgb_dict = convert2tensor(patch_rgb_dict) - resize_rgb_dict = {k: v.clone() for k, v in tensor_rgb_dict.items()} - max_hw = np.array([*patch_rgb_dict['mask'].shape[-2:]]).max() - init_frac = config['largest_size'] / (np.array([*patch_rgb_dict['mask'].shape[-2:]]).prod() ** 0.5) - resize_hw = [patch_rgb_dict['mask'].shape[-2] * init_frac, patch_rgb_dict['mask'].shape[-1] * init_frac] - resize_max_hw = max(resize_hw) - frac = (np.floor(resize_max_hw / 128.) * 128.) / max_hw - if frac < 1: - resize_mark = torch.nn.functional.interpolate(torch.cat((resize_rgb_dict['mask'], - resize_rgb_dict['context']), - dim=1), - scale_factor=frac, - mode='area') - resize_rgb_dict['mask'] = (resize_mark[:, 0:1] > 0).float() - resize_rgb_dict['context'] = (resize_mark[:, 1:2] == 1).float() - resize_rgb_dict['context'][resize_rgb_dict['mask'] > 0] = 0 - resize_rgb_dict['rgb'] = torch.nn.functional.interpolate(resize_rgb_dict['rgb'], - scale_factor=frac, - mode='area') - resize_rgb_dict['rgb'] = resize_rgb_dict['rgb'] * resize_rgb_dict['context'] - resize_rgb_dict['edge'] = torch.nn.functional.interpolate(resize_rgb_dict['edge'], - scale_factor=frac, - mode='area') - resize_rgb_dict['edge'] = (resize_rgb_dict['edge'] > 0).float() * 0 - resize_rgb_dict['edge'] = resize_rgb_dict['edge'] * (resize_rgb_dict['context'] + resize_rgb_dict['mask']) - rgb_input_feat = torch.cat((resize_rgb_dict['rgb'], resize_rgb_dict['edge']), dim=1) - rgb_input_feat[:, 3] = 1 - rgb_input_feat[:, 3] - resize_mask = open_small_mask(resize_rgb_dict['mask'], resize_rgb_dict['context'], 3, 41) - specified_hole = resize_mask - with torch.no_grad(): - rgb_output = rgb_model.forward_3P(specified_hole, - resize_rgb_dict['context'], - resize_rgb_dict['rgb'], - resize_rgb_dict['edge'], - unit_length=128, - cuda=device) - rgb_output = rgb_output.cpu() - if config.get('gray_image') is True: - rgb_output = rgb_output.mean(1, keepdim=True).repeat((1,3,1,1)) - rgb_output = rgb_output.cpu() - resize_rgb_dict['output'] = rgb_output * resize_rgb_dict['mask'] + resize_rgb_dict['rgb'] - tensor_rgb_dict['output'] = resize_rgb_dict['output'] - if frac < 1: - tensor_rgb_dict['output'] = torch.nn.functional.interpolate(tensor_rgb_dict['output'], - size=tensor_rgb_dict['mask'].shape[-2:], - mode='bicubic') - tensor_rgb_dict['output'] = tensor_rgb_dict['output'] * \ - tensor_rgb_dict['mask'] + (tensor_rgb_dict['rgb'] * tensor_rgb_dict['context']) - patch_rgb_dict['output'] = tensor_rgb_dict['output'].data.cpu().numpy().squeeze().transpose(1,2,0) - rgb_dict['output'] = np.zeros((mesh.graph['H'], mesh.graph['W'], 3)) - rgb_dict['output'][union_size['x_min']:union_size['x_max'], union_size['y_min']:union_size['y_max']] = \ - patch_rgb_dict['output'] - - if require_depth_edge(patch_edge_dict['edge'], patch_edge_dict['mask']) or inpaint_iter > 0: - edge_occlusion = True - else: - edge_occlusion = False - for node in erode_context_cc: - if rgb_dict['mask'][node[0], node[1]] > 0: - for info in info_on_pix[(node[0], node[1])]: - if abs(info['depth']) == abs(node[2]): - info['update_color'] = (rgb_dict['output'][node[0], node[1]] * 255).astype(np.uint8) - if frac < 1.: - depth_edge_dilate_2_color_flag = False - else: - depth_edge_dilate_2_color_flag = True - hxs, hys = np.where((rgb_dict['mask'] > 0) & (rgb_dict['erode'] == 0)) - for hx, hy in zip(hxs, hys): - real_depth = None - if abs(depth_dict['output'][hx, hy]) <= abs(np_depth[hx, hy]): - depth_dict['output'][hx, hy] = np_depth[hx, hy] + 0.01 - node = (hx, hy, -depth_dict['output'][hx, hy]) - if info_on_pix.get((node[0], node[1])) is not None: - for info in info_on_pix.get((node[0], node[1])): - if info.get('inpaint_id') is None or abs(info['inpaint_id'] < mesh.nodes[(hx, hy)]['inpaint_id']): - pre_depth = info['depth'] if info.get('real_depth') is None else info['real_depth'] - if abs(node[2]) < abs(pre_depth): - node = (node[0], node[1], -(abs(pre_depth) + 0.001)) - if mesh.has_node(node): - real_depth = node[2] - while True: - if mesh.has_node(node): - node = (node[0], node[1], -(abs(node[2]) + 0.001)) - else: - break - if real_depth == node[2]: - real_depth = None - cur_disp = 1./node[2] - if not(mesh.has_node(node)): - if not mesh.has_node((node[0], node[1])): - print("2D node not found.") - import pdb; pdb.set_trace() - if inpaint_iter == 1: - paint = (rgb_dict['output'][hx, hy] * 255).astype(np.uint8) - else: - paint = (rgb_dict['output'][hx, hy] * 255).astype(np.uint8) - ndict = dict(color=paint, - synthesis=True, - disp=cur_disp, - cc_id=set([edge_id]), - overlap_number=1.0, - refine_depth=False, - edge_occlusion=edge_occlusion, - depth_edge_dilate_2_color_flag=depth_edge_dilate_2_color_flag, - real_depth=real_depth) - mesh, _, _ = refresh_node((node[0], node[1]), mesh.nodes[(node[0], node[1])], node, ndict, mesh, stime=True) - if inpaint_iter == 0 and mesh.degree(node) < 4: - connnect_points_ccs[edge_id].add(node) - if info_on_pix.get((hx, hy)) is None: - info_on_pix[(hx, hy)] = [] - new_info = {'depth':node[2], - 'color': paint, - 'synthesis':True, - 'disp':cur_disp, - 'cc_id':set([edge_id]), - 'inpaint_id':inpaint_iter + 1, - 'edge_occlusion':edge_occlusion, - 'overlap_number':1.0, - 'real_depth': real_depth} - info_on_pix[(hx, hy)].append(new_info) - specific_edge_id = tmp_specific_edge_id - for erode_id, erode_context_cc in enumerate(erode_context_ccs): - if len(specific_edge_id) > 0 and erode_id not in specific_edge_id: - continue - for erode_node in erode_context_cc: - for info in info_on_pix[(erode_node[0], erode_node[1])]: - if info['depth'] == erode_node[2]: - info['color'] = info['update_color'] - mesh.nodes[erode_node]['color'] = info['update_color'] - np_image[(erode_node[0], erode_node[1])] = info['update_color'] - new_edge_ccs = [set() for _ in range(mesh.graph['max_edge_id'] + 1)] - for node in mesh.nodes: - if len(node) == 2: - mesh.remove_node(node) - continue - if mesh.nodes[node].get('edge_id') is not None and mesh.nodes[node].get('inpaint_id') == inpaint_iter + 1: - if mesh.nodes[node].get('inpaint_twice') is False: - continue - try: - new_edge_ccs[mesh.nodes[node].get('edge_id')].add(node) - except: - import pdb; pdb.set_trace() - specific_mask_nodes = None - if inpaint_iter == 0: - mesh, info_on_pix = refine_color_around_edge(mesh, info_on_pix, new_edge_ccs, config, False) - - return mesh, info_on_pix, specific_mask_nodes, new_edge_ccs, connnect_points_ccs, np_image - - -def write_ply(image, - depth, - int_mtx, - ply_name, - config, - rgb_model, - depth_edge_model, - depth_edge_model_init, - depth_feat_model): - depth = depth.astype(np.float64) - input_mesh, xy2depth, image, depth = create_mesh(depth, image, int_mtx, config) - - H, W = input_mesh.graph['H'], input_mesh.graph['W'] - input_mesh = tear_edges(input_mesh, config['depth_threshold'], xy2depth) - input_mesh, info_on_pix = generate_init_node(input_mesh, config, min_node_in_cc=200) - edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=False) - edge_canvas = np.zeros((H, W)) - 1 - - input_mesh, info_on_pix, depth = reassign_floating_island(input_mesh, info_on_pix, image, depth) - input_mesh = update_status(input_mesh, info_on_pix) - specific_edge_id = [] - edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=True) - pre_depth = depth.copy() - input_mesh, info_on_pix, edge_mesh, depth, aft_mark = remove_dangling(input_mesh, edge_ccs, edge_mesh, info_on_pix, image, depth, config) - - input_mesh, depth, info_on_pix = update_status(input_mesh, info_on_pix, depth) - edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=True) - edge_canvas = np.zeros((H, W)) - 1 - - mesh, info_on_pix, depth = fill_missing_node(input_mesh, info_on_pix, image, depth) - if config['extrapolate_border'] is True: - pre_depth = depth.copy() - input_mesh, info_on_pix, depth = refresh_bord_depth(input_mesh, info_on_pix, image, depth) - input_mesh = remove_node_feat(input_mesh, 'edge_id') - aft_depth = depth.copy() - input_mesh, info_on_pix, depth, image = enlarge_border(input_mesh, info_on_pix, depth, image, config) - noext_H, noext_W = H, W - H, W = image.shape[:2] - input_mesh, info_on_pix = fill_dummy_bord(input_mesh, info_on_pix, image, depth, config) - edge_ccs, input_mesh, edge_mesh = \ - group_edges(input_mesh, config, image, remove_conflict_ordinal=True) - input_mesh = combine_end_node(input_mesh, edge_mesh, edge_ccs, depth) - input_mesh, depth, info_on_pix = update_status(input_mesh, info_on_pix, depth) - edge_ccs, input_mesh, edge_mesh = \ - group_edges(input_mesh, config, image, remove_conflict_ordinal=True, spdb=False) - input_mesh = remove_redundant_edge(input_mesh, edge_mesh, edge_ccs, info_on_pix, config, redundant_number=config['redundant_number'], spdb=False) - input_mesh, depth, info_on_pix = update_status(input_mesh, info_on_pix, depth) - edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=True) - input_mesh = combine_end_node(input_mesh, edge_mesh, edge_ccs, depth) - input_mesh = remove_redundant_edge(input_mesh, edge_mesh, edge_ccs, info_on_pix, config, redundant_number=config['redundant_number'], invalid=True, spdb=False) - input_mesh, depth, info_on_pix = update_status(input_mesh, info_on_pix, depth) - edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=True) - input_mesh = combine_end_node(input_mesh, edge_mesh, edge_ccs, depth) - input_mesh, depth, info_on_pix = update_status(input_mesh, info_on_pix, depth) - edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=True) - edge_condition = lambda x, m: m.nodes[x].get('far') is not None and len(m.nodes[x].get('far')) > 0 - edge_map = get_map_from_ccs(edge_ccs, input_mesh.graph['H'], input_mesh.graph['W'], input_mesh, edge_condition) - other_edge_with_id = get_map_from_ccs(edge_ccs, input_mesh.graph['H'], input_mesh.graph['W'], real_id=True) - info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs, - depth_edge_model, depth_feat_model, rgb_model, config, direc="up") - info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs, - depth_edge_model, depth_feat_model, rgb_model, config, direc="left") - info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs, - depth_edge_model, depth_feat_model, rgb_model, config, direc="down") - info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs, - depth_edge_model, depth_feat_model, rgb_model, config, direc="right") - info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs, - depth_edge_model, depth_feat_model, rgb_model, config, direc="right-up") - info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs, - depth_edge_model, depth_feat_model, rgb_model, config, direc="right-down") - info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs, - depth_edge_model, depth_feat_model, rgb_model, config, direc="left-up") - info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs, - depth_edge_model, depth_feat_model, rgb_model, config, direc="left-down") - specific_edge_loc = None - specific_edge_id = [] - vis_edge_id = None - context_ccs, mask_ccs, broken_mask_ccs, edge_ccs, erode_context_ccs, \ - init_mask_connect, edge_maps, extend_context_ccs, extend_edge_ccs, extend_erode_context_ccs = \ - context_and_holes(input_mesh, - edge_ccs, - config, - specific_edge_id, - specific_edge_loc, - depth_feat_model, - inpaint_iter=0, - vis_edge_id=vis_edge_id) - edge_canvas = np.zeros((H, W)) - mask = np.zeros((H, W)) - context = np.zeros((H, W)) - vis_edge_ccs = filter_edge(input_mesh, edge_ccs, config) - edge_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W'])) - 1 - specific_edge_loc = None - FG_edge_maps = edge_maps.copy() - edge_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W'])) - 1 - # for cc_id, cc in enumerate(edge_ccs): - # for node in cc: - # edge_canvas[node[0], node[1]] = cc_id - # f, ((ax0, ax1, ax2)) = plt.subplots(1, 3, sharex=True, sharey=True); ax0.imshow(1./depth); ax1.imshow(image); ax2.imshow(edge_canvas); plt.show() - input_mesh, info_on_pix, specific_edge_nodes, new_edge_ccs, connect_points_ccs, image = DL_inpaint_edge(input_mesh, - info_on_pix, - config, - image, - depth, - context_ccs, - erode_context_ccs, - extend_context_ccs, - extend_erode_context_ccs, - mask_ccs, - broken_mask_ccs, - edge_ccs, - extend_edge_ccs, - init_mask_connect, - edge_maps, - rgb_model, - depth_edge_model, - depth_edge_model_init, - depth_feat_model, - specific_edge_id, - specific_edge_loc, - inpaint_iter=0) - specific_edge_id = [] - edge_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W'])) - connect_points_ccs = [set() for _ in connect_points_ccs] - context_ccs, mask_ccs, broken_mask_ccs, edge_ccs, erode_context_ccs, init_mask_connect, \ - edge_maps, extend_context_ccs, extend_edge_ccs, extend_erode_context_ccs = \ - context_and_holes(input_mesh, new_edge_ccs, config, specific_edge_id, specific_edge_loc, depth_feat_model, connect_points_ccs, inpaint_iter=1) - mask_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W'])) - context_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W'])) - erode_context_ccs_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W'])) - edge_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W'])) - # edge_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W'])) - 1 - # for cc_id, cc in enumerate(edge_ccs): - # for node in cc: - # edge_canvas[node[0], node[1]] = cc_id - specific_edge_id = [] - input_mesh, info_on_pix, specific_edge_nodes, new_edge_ccs, _, image = DL_inpaint_edge(input_mesh, - info_on_pix, - config, - image, - depth, - context_ccs, - erode_context_ccs, - extend_context_ccs, - extend_erode_context_ccs, - mask_ccs, - broken_mask_ccs, - edge_ccs, - extend_edge_ccs, - init_mask_connect, - edge_maps, - rgb_model, - depth_edge_model, - depth_edge_model_init, - depth_feat_model, - specific_edge_id, - specific_edge_loc, - inpaint_iter=1) - vertex_id = 0 - input_mesh.graph['H'], input_mesh.graph['W'] = input_mesh.graph['noext_H'], input_mesh.graph['noext_W'] - background_canvas = np.zeros((input_mesh.graph['H'], - input_mesh.graph['W'], - 3)) - ply_flag = config.get('save_ply') - if ply_flag is True: - node_str_list = [] - else: - node_str_color = [] - node_str_point = [] - out_fmt = lambda x, x_flag: str(x) if x_flag is True else x - point_time = 0 - hlight_time = 0 - cur_id_time = 0 - node_str_time = 0 - generate_face_time = 0 - point_list = [] - k_00, k_02, k_11, k_12 = \ - input_mesh.graph['cam_param_pix_inv'][0, 0], input_mesh.graph['cam_param_pix_inv'][0, 2], \ - input_mesh.graph['cam_param_pix_inv'][1, 1], input_mesh.graph['cam_param_pix_inv'][1, 2] - w_offset = input_mesh.graph['woffset'] - h_offset = input_mesh.graph['hoffset'] - for pix_xy, pix_list in info_on_pix.items(): - for pix_idx, pix_info in enumerate(pix_list): - pix_depth = pix_info['depth'] if pix_info.get('real_depth') is None else pix_info['real_depth'] - str_pt = [out_fmt(x, ply_flag) for x in reproject_3d_int_detail(pix_xy[0], pix_xy[1], pix_depth, - k_00, k_02, k_11, k_12, w_offset, h_offset)] - if input_mesh.has_node((pix_xy[0], pix_xy[1], pix_info['depth'])) is False: - return False - continue - if pix_info.get('overlap_number') is not None: - str_color = [out_fmt(x, ply_flag) for x in (pix_info['color']/pix_info['overlap_number']).astype(np.uint8).tolist()] - else: - str_color = [out_fmt(x, ply_flag) for x in pix_info['color'].tolist()] - if pix_info.get('edge_occlusion') is True: - str_color.append(out_fmt(4, ply_flag)) - else: - if pix_info.get('inpaint_id') is None: - str_color.append(out_fmt(1, ply_flag)) - else: - str_color.append(out_fmt(pix_info.get('inpaint_id') + 1, ply_flag)) - if pix_info.get('modified_border') is True or pix_info.get('ext_pixel') is True: - if len(str_color) == 4: - str_color[-1] = out_fmt(5, ply_flag) - else: - str_color.append(out_fmt(5, ply_flag)) - pix_info['cur_id'] = vertex_id - input_mesh.nodes[(pix_xy[0], pix_xy[1], pix_info['depth'])]['cur_id'] = out_fmt(vertex_id, ply_flag) - vertex_id += 1 - if ply_flag is True: - node_str_list.append(' '.join(str_pt) + ' ' + ' '.join(str_color) + '\n') - else: - node_str_color.append(str_color) - node_str_point.append(str_pt) - str_faces = generate_face(input_mesh, info_on_pix, config) - if config['save_ply'] is True: - print("Writing mesh file %s ..." % ply_name) - with open(ply_name, 'w') as ply_fi: - ply_fi.write('ply\n' + 'format ascii 1.0\n') - ply_fi.write('comment H ' + str(int(input_mesh.graph['H'])) + '\n') - ply_fi.write('comment W ' + str(int(input_mesh.graph['W'])) + '\n') - ply_fi.write('comment hFov ' + str(float(input_mesh.graph['hFov'])) + '\n') - ply_fi.write('comment vFov ' + str(float(input_mesh.graph['vFov'])) + '\n') - ply_fi.write('element vertex ' + str(len(node_str_list)) + '\n') - ply_fi.write('property float x\n' + \ - 'property float y\n' + \ - 'property float z\n' + \ - 'property uchar red\n' + \ - 'property uchar green\n' + \ - 'property uchar blue\n' + \ - 'property uchar alpha\n') - ply_fi.write('element face ' + str(len(str_faces)) + '\n') - ply_fi.write('property list uchar int vertex_index\n') - ply_fi.write('end_header\n') - ply_fi.writelines(node_str_list) - ply_fi.writelines(str_faces) - ply_fi.close() - return input_mesh - else: - H = int(input_mesh.graph['H']) - W = int(input_mesh.graph['W']) - hFov = input_mesh.graph['hFov'] - vFov = input_mesh.graph['vFov'] - node_str_color = np.array(node_str_color).astype(np.float32) - node_str_color[..., :3] = node_str_color[..., :3] / 255. - node_str_point = np.array(node_str_point) - str_faces = np.array(str_faces) - - return node_str_point, node_str_color, str_faces, H, W, hFov, vFov - -def read_ply(mesh_fi): - ply_fi = open(mesh_fi, 'r') - Height = None - Width = None - hFov = None - vFov = None - while True: - line = ply_fi.readline().split('\n')[0] - if line.startswith('element vertex'): - num_vertex = int(line.split(' ')[-1]) - elif line.startswith('element face'): - num_face = int(line.split(' ')[-1]) - elif line.startswith('comment'): - if line.split(' ')[1] == 'H': - Height = int(line.split(' ')[-1].split('\n')[0]) - if line.split(' ')[1] == 'W': - Width = int(line.split(' ')[-1].split('\n')[0]) - if line.split(' ')[1] == 'hFov': - hFov = float(line.split(' ')[-1].split('\n')[0]) - if line.split(' ')[1] == 'vFov': - vFov = float(line.split(' ')[-1].split('\n')[0]) - elif line.startswith('end_header'): - break - contents = ply_fi.readlines() - vertex_infos = contents[:num_vertex] - face_infos = contents[num_vertex:] - verts = [] - colors = [] - faces = [] - for v_info in vertex_infos: - str_info = [float(v) for v in v_info.split('\n')[0].split(' ')] - if len(str_info) == 6: - vx, vy, vz, r, g, b = str_info - else: - vx, vy, vz, r, g, b, hi = str_info - verts.append([vx, vy, vz]) - colors.append([r, g, b, hi]) - verts = np.array(verts) - try: - colors = np.array(colors) - colors[..., :3] = colors[..., :3]/255. - except: - import pdb - pdb.set_trace() - - for f_info in face_infos: - _, v1, v2, v3 = [int(f) for f in f_info.split('\n')[0].split(' ')] - faces.append([v1, v2, v3]) - faces = np.array(faces) - - - return verts, colors, faces, Height, Width, hFov, vFov - - -class Canvas_view(): - def __init__(self, - fov, - verts, - faces, - colors, - canvas_size, - factor=1, - bgcolor='gray', - proj='perspective', - ): - self.canvas = scene.SceneCanvas(bgcolor=bgcolor, size=(canvas_size*factor, canvas_size*factor)) - self.view = self.canvas.central_widget.add_view() - self.view.camera = 'perspective' - self.view.camera.fov = fov - self.mesh = visuals.Mesh(shading=None) - self.mesh.attach(Alpha(1.0)) - self.view.add(self.mesh) - self.tr = self.view.camera.transform - self.mesh.set_data(vertices=verts, faces=faces, vertex_colors=colors[:, :3]) - self.translate([0,0,0]) - self.rotate(axis=[1,0,0], angle=180) - self.view_changed() - - def translate(self, trans=[0,0,0]): - self.tr.translate(trans) - - def rotate(self, axis=[1,0,0], angle=0): - self.tr.rotate(axis=axis, angle=angle) - - def view_changed(self): - self.view.camera.view_changed() - - def render(self): - return self.canvas.render() - - def reinit_mesh(self, verts, faces, colors): - self.mesh.set_data(vertices=verts, faces=faces, vertex_colors=colors[:, :3]) - - def reinit_camera(self, fov): - self.view.camera.fov = fov - self.view.camera.view_changed() - - -def output_3d_photo(verts, colors, faces, Height, Width, hFov, vFov, tgt_poses, video_traj_types, ref_pose, - output_dir, ref_image, int_mtx, config, image, videos_poses, video_basename, original_H=None, original_W=None, - border=None, depth=None, normal_canvas=None, all_canvas=None, mean_loc_depth=None): - - cam_mesh = netx.Graph() - cam_mesh.graph['H'] = Height - cam_mesh.graph['W'] = Width - cam_mesh.graph['original_H'] = original_H - cam_mesh.graph['original_W'] = original_W - int_mtx_real_x = int_mtx[0] * Width - int_mtx_real_y = int_mtx[1] * Height - cam_mesh.graph['hFov'] = 2 * np.arctan((1. / 2.) * ((cam_mesh.graph['original_W']) / int_mtx_real_x[0])) - cam_mesh.graph['vFov'] = 2 * np.arctan((1. / 2.) * ((cam_mesh.graph['original_H']) / int_mtx_real_y[1])) - colors = colors[..., :3] - - fov_in_rad = max(cam_mesh.graph['vFov'], cam_mesh.graph['hFov']) - fov = (fov_in_rad * 180 / np.pi) - print("fov: " + str(fov)) - init_factor = 1 - if config.get('anti_flickering') is True: - init_factor = 3 - if (cam_mesh.graph['original_H'] is not None) and (cam_mesh.graph['original_W'] is not None): - canvas_w = cam_mesh.graph['original_W'] - canvas_h = cam_mesh.graph['original_H'] - else: - canvas_w = cam_mesh.graph['W'] - canvas_h = cam_mesh.graph['H'] - canvas_size = max(canvas_h, canvas_w) - if normal_canvas is None: - normal_canvas = Canvas_view(fov, - verts, - faces, - colors, - canvas_size=canvas_size, - factor=init_factor, - bgcolor='gray', - proj='perspective') - else: - normal_canvas.reinit_mesh(verts, faces, colors) - normal_canvas.reinit_camera(fov) - img = normal_canvas.render() - backup_img, backup_all_img, all_img_wo_bound = img.copy(), img.copy() * 0, img.copy() * 0 - img = cv2.resize(img, (int(img.shape[1] / init_factor), int(img.shape[0] / init_factor)), interpolation=cv2.INTER_AREA) - if border is None: - border = [0, img.shape[0], 0, img.shape[1]] - H, W = cam_mesh.graph['H'], cam_mesh.graph['W'] - if (cam_mesh.graph['original_H'] is not None) and (cam_mesh.graph['original_W'] is not None): - aspect_ratio = cam_mesh.graph['original_H'] / cam_mesh.graph['original_W'] - else: - aspect_ratio = cam_mesh.graph['H'] / cam_mesh.graph['W'] - if aspect_ratio > 1: - img_h_len = cam_mesh.graph['H'] if cam_mesh.graph.get('original_H') is None else cam_mesh.graph['original_H'] - img_w_len = img_h_len / aspect_ratio - anchor = [0, - img.shape[0], - int(max(0, int((img.shape[1])//2 - img_w_len//2))), - int(min(int((img.shape[1])//2 + img_w_len//2), (img.shape[1])-1))] - elif aspect_ratio <= 1: - img_w_len = cam_mesh.graph['W'] if cam_mesh.graph.get('original_W') is None else cam_mesh.graph['original_W'] - img_h_len = img_w_len * aspect_ratio - anchor = [int(max(0, int((img.shape[0])//2 - img_h_len//2))), - int(min(int((img.shape[0])//2 + img_h_len//2), (img.shape[0])-1)), - 0, - img.shape[1]] - anchor = np.array(anchor) - plane_width = np.tan(fov_in_rad/2.) * np.abs(mean_loc_depth) - for video_pose, video_traj_type in zip(videos_poses, video_traj_types): - stereos = [] - tops = []; buttoms = []; lefts = []; rights = [] - for tp_id, tp in enumerate(video_pose): - rel_pose = np.linalg.inv(np.dot(tp, np.linalg.inv(ref_pose))) - axis, angle = transforms3d.axangles.mat2axangle(rel_pose[0:3, 0:3]) - normal_canvas.rotate(axis=axis, angle=(angle*180)/np.pi) - normal_canvas.translate(rel_pose[:3,3]) - new_mean_loc_depth = mean_loc_depth - float(rel_pose[2, 3]) - if 'dolly' in video_traj_type: - new_fov = float((np.arctan2(plane_width, np.array([np.abs(new_mean_loc_depth)])) * 180. / np.pi) * 2) - normal_canvas.reinit_camera(new_fov) - else: - normal_canvas.reinit_camera(fov) - normal_canvas.view_changed() - img = normal_canvas.render() - img = cv2.GaussianBlur(img,(int(init_factor//2 * 2 + 1), int(init_factor//2 * 2 + 1)), 0) - img = cv2.resize(img, (int(img.shape[1] / init_factor), int(img.shape[0] / init_factor)), interpolation=cv2.INTER_AREA) - img = img[anchor[0]:anchor[1], anchor[2]:anchor[3]] - img = img[int(border[0]):int(border[1]), int(border[2]):int(border[3])] - - if any(np.array(config['crop_border']) > 0.0): - H_c, W_c, _ = img.shape - o_t = int(H_c * config['crop_border'][0]) - o_l = int(W_c * config['crop_border'][1]) - o_b = int(H_c * config['crop_border'][2]) - o_r = int(W_c * config['crop_border'][3]) - img = img[o_t:H_c-o_b, o_l:W_c-o_r] - img = cv2.resize(img, (W_c, H_c), interpolation=cv2.INTER_CUBIC) - - """ - img = cv2.resize(img, (int(img.shape[1] / init_factor), int(img.shape[0] / init_factor)), interpolation=cv2.INTER_CUBIC) - img = img[anchor[0]:anchor[1], anchor[2]:anchor[3]] - img = img[int(border[0]):int(border[1]), int(border[2]):int(border[3])] - - if config['crop_border'] is True: - top, buttom, left, right = find_largest_rect(img, bg_color=(128, 128, 128)) - tops.append(top); buttoms.append(buttom); lefts.append(left); rights.append(right) - """ - stereos.append(img[..., :3]) - normal_canvas.translate(-rel_pose[:3,3]) - normal_canvas.rotate(axis=axis, angle=-(angle*180)/np.pi) - normal_canvas.view_changed() - """ - if config['crop_border'] is True: - atop, abuttom = min(max(tops), img.shape[0]//2 - 10), max(min(buttoms), img.shape[0]//2 + 10) - aleft, aright = min(max(lefts), img.shape[1]//2 - 10), max(min(rights), img.shape[1]//2 + 10) - atop -= atop % 2; abuttom -= abuttom % 2; aleft -= aleft % 2; aright -= aright % 2 - else: - atop = 0; abuttom = img.shape[0] - img.shape[0] % 2; aleft = 0; aright = img.shape[1] - img.shape[1] % 2 - """ - atop = 0; abuttom = img.shape[0] - img.shape[0] % 2; aleft = 0; aright = img.shape[1] - img.shape[1] % 2 - crop_stereos = [] - for stereo in stereos: - crop_stereos.append((stereo[atop:abuttom, aleft:aright, :3] * 1).astype(np.uint8)) - stereos = crop_stereos - clip = ImageSequenceClip(stereos, fps=config['fps']) - if isinstance(video_basename, list): - video_basename = video_basename[0] - clip.write_videofile(os.path.join(output_dir, video_basename + '_' + video_traj_type + '.mp4'), fps=config['fps']) - - - - return normal_canvas, all_canvas diff --git a/spaces/KarmKarma/rvc-models-genshinimpact/infer_pack/attentions.py b/spaces/KarmKarma/rvc-models-genshinimpact/infer_pack/attentions.py deleted file mode 100644 index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000 --- a/spaces/KarmKarma/rvc-models-genshinimpact/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from infer_pack import commons -from infer_pack import modules -from infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/KazeDevID/RVC-Model/infer_pack/attentions.py b/spaces/KazeDevID/RVC-Model/infer_pack/attentions.py deleted file mode 100644 index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000 --- a/spaces/KazeDevID/RVC-Model/infer_pack/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -from infer_pack import commons -from infer_pack import modules -from infer_pack.modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=10, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/KenjieDec/RemBG/rembg/commands/s_command.py b/spaces/KenjieDec/RemBG/rembg/commands/s_command.py deleted file mode 100644 index 4fba1ce3e71e0e55fd2d9d81a4b35b5637c1be37..0000000000000000000000000000000000000000 --- a/spaces/KenjieDec/RemBG/rembg/commands/s_command.py +++ /dev/null @@ -1,285 +0,0 @@ -import json -import os -import webbrowser -from typing import Optional, Tuple, cast - -import aiohttp -import click -import gradio as gr -import uvicorn -from asyncer import asyncify -from fastapi import Depends, FastAPI, File, Form, Query -from fastapi.middleware.cors import CORSMiddleware -from starlette.responses import Response - -from .._version import get_versions -from ..bg import remove -from ..session_factory import new_session -from ..sessions import sessions_names -from ..sessions.base import BaseSession - - -@click.command( - name="s", - help="for a http server", -) -@click.option( - "-p", - "--port", - default=5000, - type=int, - show_default=True, - help="port", -) -@click.option( - "-l", - "--log_level", - default="info", - type=str, - show_default=True, - help="log level", -) -@click.option( - "-t", - "--threads", - default=None, - type=int, - show_default=True, - help="number of worker threads", -) -def s_command(port: int, log_level: str, threads: int) -> None: - sessions: dict[str, BaseSession] = {} - tags_metadata = [ - { - "name": "Background Removal", - "description": "Endpoints that perform background removal with different image sources.", - "externalDocs": { - "description": "GitHub Source", - "url": "https://github.com/danielgatis/rembg", - }, - }, - ] - app = FastAPI( - title="Rembg", - description="Rembg is a tool to remove images background. That is it.", - version=get_versions()["version"], - contact={ - "name": "Daniel Gatis", - "url": "https://github.com/danielgatis", - "email": "danielgatis@gmail.com", - }, - license_info={ - "name": "MIT License", - "url": "https://github.com/danielgatis/rembg/blob/main/LICENSE.txt", - }, - openapi_tags=tags_metadata, - docs_url="/api", - ) - - app.add_middleware( - CORSMiddleware, - allow_credentials=True, - allow_origins=["*"], - allow_methods=["*"], - allow_headers=["*"], - ) - - class CommonQueryParams: - def __init__( - self, - model: str = Query( - description="Model to use when processing image", - regex=r"(" + "|".join(sessions_names) + ")", - default="u2net", - ), - a: bool = Query(default=False, description="Enable Alpha Matting"), - af: int = Query( - default=240, - ge=0, - le=255, - description="Alpha Matting (Foreground Threshold)", - ), - ab: int = Query( - default=10, - ge=0, - le=255, - description="Alpha Matting (Background Threshold)", - ), - ae: int = Query( - default=10, ge=0, description="Alpha Matting (Erode Structure Size)" - ), - om: bool = Query(default=False, description="Only Mask"), - ppm: bool = Query(default=False, description="Post Process Mask"), - bgc: Optional[str] = Query(default=None, description="Background Color"), - extras: Optional[str] = Query( - default=None, description="Extra parameters as JSON" - ), - ): - self.model = model - self.a = a - self.af = af - self.ab = ab - self.ae = ae - self.om = om - self.ppm = ppm - self.extras = extras - self.bgc = ( - cast(Tuple[int, int, int, int], tuple(map(int, bgc.split(",")))) - if bgc - else None - ) - - class CommonQueryPostParams: - def __init__( - self, - model: str = Form( - description="Model to use when processing image", - regex=r"(" + "|".join(sessions_names) + ")", - default="u2net", - ), - a: bool = Form(default=False, description="Enable Alpha Matting"), - af: int = Form( - default=240, - ge=0, - le=255, - description="Alpha Matting (Foreground Threshold)", - ), - ab: int = Form( - default=10, - ge=0, - le=255, - description="Alpha Matting (Background Threshold)", - ), - ae: int = Form( - default=10, ge=0, description="Alpha Matting (Erode Structure Size)" - ), - om: bool = Form(default=False, description="Only Mask"), - ppm: bool = Form(default=False, description="Post Process Mask"), - bgc: Optional[str] = Query(default=None, description="Background Color"), - extras: Optional[str] = Query( - default=None, description="Extra parameters as JSON" - ), - ): - self.model = model - self.a = a - self.af = af - self.ab = ab - self.ae = ae - self.om = om - self.ppm = ppm - self.extras = extras - self.bgc = ( - cast(Tuple[int, int, int, int], tuple(map(int, bgc.split(",")))) - if bgc - else None - ) - - def im_without_bg(content: bytes, commons: CommonQueryParams) -> Response: - kwargs = {} - - if commons.extras: - try: - kwargs.update(json.loads(commons.extras)) - except Exception: - pass - - return Response( - remove( - content, - session=sessions.setdefault(commons.model, new_session(commons.model)), - alpha_matting=commons.a, - alpha_matting_foreground_threshold=commons.af, - alpha_matting_background_threshold=commons.ab, - alpha_matting_erode_size=commons.ae, - only_mask=commons.om, - post_process_mask=commons.ppm, - bgcolor=commons.bgc, - **kwargs, - ), - media_type="image/png", - ) - - @app.on_event("startup") - def startup(): - try: - webbrowser.open(f"http://localhost:{port}") - except Exception: - pass - - if threads is not None: - from anyio import CapacityLimiter - from anyio.lowlevel import RunVar - - RunVar("_default_thread_limiter").set(CapacityLimiter(threads)) - - @app.get( - path="/api/remove", - tags=["Background Removal"], - summary="Remove from URL", - description="Removes the background from an image obtained by retrieving an URL.", - ) - async def get_index( - url: str = Query( - default=..., description="URL of the image that has to be processed." - ), - commons: CommonQueryParams = Depends(), - ): - async with aiohttp.ClientSession() as session: - async with session.get(url) as response: - file = await response.read() - return await asyncify(im_without_bg)(file, commons) - - @app.post( - path="/api/remove", - tags=["Background Removal"], - summary="Remove from Stream", - description="Removes the background from an image sent within the request itself.", - ) - async def post_index( - file: bytes = File( - default=..., - description="Image file (byte stream) that has to be processed.", - ), - commons: CommonQueryPostParams = Depends(), - ): - return await asyncify(im_without_bg)(file, commons) # type: ignore - - def gr_app(app): - def inference(input_path, model): - output_path = "output.png" - with open(input_path, "rb") as i: - with open(output_path, "wb") as o: - input = i.read() - output = remove(input, session=new_session(model)) - o.write(output) - return os.path.join(output_path) - - interface = gr.Interface( - inference, - [ - gr.components.Image(type="filepath", label="Input"), - gr.components.Dropdown( - [ - "u2net", - "u2netp", - "u2net_human_seg", - "u2net_cloth_seg", - "silueta", - "isnet-general-use", - "isnet-anime", - ], - value="u2net", - label="Models", - ), - ], - gr.components.Image(type="filepath", label="Output"), - ) - - interface.queue(concurrency_count=3) - app = gr.mount_gradio_app(app, interface, path="/") - return app - - print(f"To access the API documentation, go to http://localhost:{port}/api") - print(f"To access the UI, go to http://localhost:{port}") - - uvicorn.run(gr_app(app), host="0.0.0.0", port=port, log_level=log_level) diff --git a/spaces/KenjieDec/RemBG/rembg/session_cloth.py b/spaces/KenjieDec/RemBG/rembg/session_cloth.py deleted file mode 100644 index 11bcef74378be4d64058772c29ac45240f60a85b..0000000000000000000000000000000000000000 --- a/spaces/KenjieDec/RemBG/rembg/session_cloth.py +++ /dev/null @@ -1,88 +0,0 @@ -from typing import List - -import numpy as np -from PIL import Image -from PIL.Image import Image as PILImage -from scipy.special import log_softmax - -from .session_base import BaseSession - -pallete1 = [ - 0, - 0, - 0, - 255, - 255, - 255, - 0, - 0, - 0, - 0, - 0, - 0, -] - -pallete2 = [ - 0, - 0, - 0, - 0, - 0, - 0, - 255, - 255, - 255, - 0, - 0, - 0, -] - -pallete3 = [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 255, - 255, - 255, -] - - -class ClothSession(BaseSession): - def predict(self, img: PILImage) -> List[PILImage]: - ort_outs = self.inner_session.run( - None, self.normalize(img, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), (768, 768)) - ) - - pred = ort_outs - pred = log_softmax(pred[0], 1) - pred = np.argmax(pred, axis=1, keepdims=True) - pred = np.squeeze(pred, 0) - pred = np.squeeze(pred, 0) - - mask = Image.fromarray(pred.astype("uint8"), mode="L") - mask = mask.resize(img.size, Image.LANCZOS) - - masks = [] - - mask1 = mask.copy() - mask1.putpalette(pallete1) - mask1 = mask1.convert("RGB").convert("L") - masks.append(mask1) - - mask2 = mask.copy() - mask2.putpalette(pallete2) - mask2 = mask2.convert("RGB").convert("L") - masks.append(mask2) - - mask3 = mask.copy() - mask3.putpalette(pallete3) - mask3 = mask3.convert("RGB").convert("L") - masks.append(mask3) - - return masks diff --git a/spaces/Kevin676/ChatGPT-with-Smooth-Voice-1.0/app.py b/spaces/Kevin676/ChatGPT-with-Smooth-Voice-1.0/app.py deleted file mode 100644 index 5836efeec57bd61e4c07db59711d18d2ca380d9d..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Smooth-Voice-1.0/app.py +++ /dev/null @@ -1,123 +0,0 @@ -from TTS.api import TTS -tts = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts", progress_bar=False, gpu=True) -import whisper -model = whisper.load_model("small") -import os -os.system('pip install voicefixer --upgrade') -from voicefixer import VoiceFixer -voicefixer = VoiceFixer() -import gradio as gr -import openai -import torch -import torchaudio -from speechbrain.pretrained import SpectralMaskEnhancement - -enhance_model = SpectralMaskEnhancement.from_hparams( -source="speechbrain/metricgan-plus-voicebank", -savedir="pretrained_models/metricgan-plus-voicebank", -run_opts={"device":"cuda"}, -) - -mes1 = [ - {"role": "system", "content": "You are a TOEFL examiner. Help me improve my oral Englsih and give me feedback."} -] - -mes2 = [ - {"role": "system", "content": "You are a mental health therapist. Your name is Tina."} -] - -mes3 = [ - {"role": "system", "content": "You are my personal assistant. Your name is Alice."} -] - -res = [] - -def transcribe(apikey, upload, audio, choice1): - - openai.api_key = apikey - - # load audio and pad/trim it to fit 30 seconds - audio = whisper.load_audio(audio) - audio = whisper.pad_or_trim(audio) - - # make log-Mel spectrogram and move to the same device as the model - mel = whisper.log_mel_spectrogram(audio).to(model.device) - - # detect the spoken language - _, probs = model.detect_language(mel) - print(f"Detected language: {max(probs, key=probs.get)}") - - # decode the audio - options = whisper.DecodingOptions() - result = whisper.decode(model, mel, options) - res.append(result.text) - - if choice1 == "TOEFL": - messages = mes1 - elif choice1 == "Therapist": - messages = mes2 - elif choice1 == "Alice": - messages = mes3 - - # chatgpt - n = len(res) - content = res[n-1] - messages.append({"role": "user", "content": content}) - - completion = openai.ChatCompletion.create( - model = "gpt-3.5-turbo", - messages = messages - ) - - chat_response = completion.choices[0].message.content - - messages.append({"role": "assistant", "content": chat_response}) - - tts.tts_to_file(chat_response, speaker_wav = upload, language="en", file_path="output.wav") - - voicefixer.restore(input="output.wav", # input wav file path - output="audio1.wav", # output wav file path - cuda=True, # whether to use gpu acceleration - mode = 0) # You can try out mode 0, 1, or 2 to find out the best result - - noisy = enhance_model.load_audio( - "audio1.wav" - ).unsqueeze(0) - - enhanced = enhance_model.enhance_batch(noisy, lengths=torch.tensor([1.])) - torchaudio.save("enhanced.wav", enhanced.cpu(), 16000) - - return [result.text, chat_response, "enhanced.wav"] - -c1=gr.Interface( - fn=transcribe, - inputs=[ - gr.Textbox(lines=1, label = "请填写您的OpenAI-API-key"), - gr.Audio(source="upload", label = "请上传您喜欢的声音(wav文件)", type="filepath"), - gr.Audio(source="microphone", label = "和您的专属AI聊天吧!", type="filepath"), - gr.Radio(["TOEFL", "Therapist", "Alice"], label="TOEFL Examiner, Therapist Tina, or Assistant Alice?"), - ], - outputs=[ - gr.Textbox(label="Speech to Text"), gr.Textbox(label="ChatGPT Output"), gr.Audio(label="Audio with Custom Voice"), - ], - #theme="huggingface", - description = "🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!", - ) - -c2=gr.Interface( - fn=transcribe, - inputs=[ - gr.Textbox(lines=1, label = "请填写您的OpenAI-API-key"), - gr.Audio(source="microphone", label = "请上传您喜欢的声音,并尽量避免噪音", type="filepath"), - gr.Audio(source="microphone", label = "和您的专属AI聊天吧!", type="filepath"), - gr.Radio(["TOEFL", "Therapist", "Alice"], label="TOEFL Examiner, Therapist Tina, or Assistant Alice?"), - ], - outputs=[ - gr.Textbox(label="Speech to Text"), gr.Textbox(label="ChatGPT Output"), gr.Audio(label="Audio with Custom Voice"), - ], - #theme="huggingface", - description = "🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!", - ) - -demo = gr.TabbedInterface([c1, c2], ["wav文件上传", "麦克风上传"], title = '🥳💬💕 - TalktoAI,随时随地,谈天说地!') -demo.launch() \ No newline at end of file diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/vocoder/distribution.py b/spaces/Kevin676/Real-Time-Voice-Cloning/vocoder/distribution.py deleted file mode 100644 index d3119a5ba1e77bc25a92d2664f83d366f12399c0..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Real-Time-Voice-Cloning/vocoder/distribution.py +++ /dev/null @@ -1,132 +0,0 @@ -import numpy as np -import torch -import torch.nn.functional as F - - -def log_sum_exp(x): - """ numerically stable log_sum_exp implementation that prevents overflow """ - # TF ordering - axis = len(x.size()) - 1 - m, _ = torch.max(x, dim=axis) - m2, _ = torch.max(x, dim=axis, keepdim=True) - return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis)) - - -# It is adapted from https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py -def discretized_mix_logistic_loss(y_hat, y, num_classes=65536, - log_scale_min=None, reduce=True): - if log_scale_min is None: - log_scale_min = float(np.log(1e-14)) - y_hat = y_hat.permute(0,2,1) - assert y_hat.dim() == 3 - assert y_hat.size(1) % 3 == 0 - nr_mix = y_hat.size(1) // 3 - - # (B x T x C) - y_hat = y_hat.transpose(1, 2) - - # unpack parameters. (B, T, num_mixtures) x 3 - logit_probs = y_hat[:, :, :nr_mix] - means = y_hat[:, :, nr_mix:2 * nr_mix] - log_scales = torch.clamp(y_hat[:, :, 2 * nr_mix:3 * nr_mix], min=log_scale_min) - - # B x T x 1 -> B x T x num_mixtures - y = y.expand_as(means) - - centered_y = y - means - inv_stdv = torch.exp(-log_scales) - plus_in = inv_stdv * (centered_y + 1. / (num_classes - 1)) - cdf_plus = torch.sigmoid(plus_in) - min_in = inv_stdv * (centered_y - 1. / (num_classes - 1)) - cdf_min = torch.sigmoid(min_in) - - # log probability for edge case of 0 (before scaling) - # equivalent: torch.log(F.sigmoid(plus_in)) - log_cdf_plus = plus_in - F.softplus(plus_in) - - # log probability for edge case of 255 (before scaling) - # equivalent: (1 - F.sigmoid(min_in)).log() - log_one_minus_cdf_min = -F.softplus(min_in) - - # probability for all other cases - cdf_delta = cdf_plus - cdf_min - - mid_in = inv_stdv * centered_y - # log probability in the center of the bin, to be used in extreme cases - # (not actually used in our code) - log_pdf_mid = mid_in - log_scales - 2. * F.softplus(mid_in) - - # tf equivalent - """ - log_probs = tf.where(x < -0.999, log_cdf_plus, - tf.where(x > 0.999, log_one_minus_cdf_min, - tf.where(cdf_delta > 1e-5, - tf.log(tf.maximum(cdf_delta, 1e-12)), - log_pdf_mid - np.log(127.5)))) - """ - # TODO: cdf_delta <= 1e-5 actually can happen. How can we choose the value - # for num_classes=65536 case? 1e-7? not sure.. - inner_inner_cond = (cdf_delta > 1e-5).float() - - inner_inner_out = inner_inner_cond * \ - torch.log(torch.clamp(cdf_delta, min=1e-12)) + \ - (1. - inner_inner_cond) * (log_pdf_mid - np.log((num_classes - 1) / 2)) - inner_cond = (y > 0.999).float() - inner_out = inner_cond * log_one_minus_cdf_min + (1. - inner_cond) * inner_inner_out - cond = (y < -0.999).float() - log_probs = cond * log_cdf_plus + (1. - cond) * inner_out - - log_probs = log_probs + F.log_softmax(logit_probs, -1) - - if reduce: - return -torch.mean(log_sum_exp(log_probs)) - else: - return -log_sum_exp(log_probs).unsqueeze(-1) - - -def sample_from_discretized_mix_logistic(y, log_scale_min=None): - """ - Sample from discretized mixture of logistic distributions - Args: - y (Tensor): B x C x T - log_scale_min (float): Log scale minimum value - Returns: - Tensor: sample in range of [-1, 1]. - """ - if log_scale_min is None: - log_scale_min = float(np.log(1e-14)) - assert y.size(1) % 3 == 0 - nr_mix = y.size(1) // 3 - - # B x T x C - y = y.transpose(1, 2) - logit_probs = y[:, :, :nr_mix] - - # sample mixture indicator from softmax - temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5) - temp = logit_probs.data - torch.log(- torch.log(temp)) - _, argmax = temp.max(dim=-1) - - # (B, T) -> (B, T, nr_mix) - one_hot = to_one_hot(argmax, nr_mix) - # select logistic parameters - means = torch.sum(y[:, :, nr_mix:2 * nr_mix] * one_hot, dim=-1) - log_scales = torch.clamp(torch.sum( - y[:, :, 2 * nr_mix:3 * nr_mix] * one_hot, dim=-1), min=log_scale_min) - # sample from logistic & clip to interval - # we don't actually round to the nearest 8bit value when sampling - u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5) - x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u)) - - x = torch.clamp(torch.clamp(x, min=-1.), max=1.) - - return x - - -def to_one_hot(tensor, n, fill_with=1.): - # we perform one hot encore with respect to the last axis - one_hot = torch.FloatTensor(tensor.size() + (n,)).zero_() - if tensor.is_cuda: - one_hot = one_hot.cuda() - one_hot.scatter_(len(tensor.size()), tensor.unsqueeze(-1), fill_with) - return one_hot diff --git a/spaces/KyanChen/FunSR/datasets/inr_sr_wrappers.py b/spaces/KyanChen/FunSR/datasets/inr_sr_wrappers.py deleted file mode 100644 index 9cd3be7d2f1255eb0cd1788883414550c0ce4298..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/datasets/inr_sr_wrappers.py +++ /dev/null @@ -1,177 +0,0 @@ -import functools -import os -import random -import math -from PIL import Image - -import numpy as np -import torch -from einops import rearrange -from torch.utils.data import Dataset -from torchvision import transforms - -from datasets import register -from utils import to_pixel_samples, to_coordinates - -import torchvision.transforms.functional as TF -import random -from typing import Sequence - - -class MyRotateTransform: - def __init__(self, angles: Sequence[int], p=0.5): - self.angles = angles - self.p = p - - def __call__(self, x): - if torch.rand(1) < self.p: - return x - angle = random.choice(self.angles) - return TF.rotate(x, angle) - -@register('inr_fixed_scale_sr_warp') -class INRFixedScaleSRWarp(Dataset): - def __init__(self, - dataset, scale_ratio, patch_size=48, - augment=False, sample_q=None, - val_mode=False, test_mode=False, - encode_scale_ratio=False, - return_cell=False, # for liff - ): - super(INRFixedScaleSRWarp, self).__init__() - self.dataset = dataset - self.scale_ratio = scale_ratio - self.patch_size = patch_size - self.hr_size = int(patch_size * scale_ratio) - self.augment = augment - self.sample_q = sample_q - self.test_mode = test_mode - self.val_mode = val_mode - self.encode_scale_ratio = encode_scale_ratio - self.return_cell = return_cell - - def __len__(self): - return len(self.dataset) - - def __getitem__(self, idx): - # import pdb - # pdb.set_trace() - img_hr, file_name = self.dataset[idx] - class_name = os.path.basename(os.path.dirname(file_name)) - file_name = os.path.basename(file_name).split('.')[0] - # img_hr: 3xHxW - h, w = img_hr.shape[-2:] - # if h < 256 or w < 256: - # img_hr = transforms.Resize(256, Image.BICUBIC)(img_hr) - - if self.test_mode or self.val_mode: - img_hr = transforms.CenterCrop(self.hr_size)(img_hr) - else: - img_hr = transforms.RandomCrop(self.hr_size)(img_hr) - if self.augment: - img_hr = transforms.RandomHorizontalFlip(p=0.5)(img_hr) - img_hr = transforms.RandomVerticalFlip(p=0.5)(img_hr) - img_hr = MyRotateTransform([90, 180, 270], p=0.5)(img_hr) - - img_lr = transforms.Resize(self.patch_size, Image.BICUBIC)(img_hr) - - hr_coord = to_coordinates(size=img_hr.shape[-2:], return_map=False) - hr_rgb = rearrange(img_hr, 'C H W -> (H W) C') - - if self.sample_q is not None and not self.test_mode: - sample_lst = np.random.choice( - len(hr_coord), self.sample_q, replace=False) - hr_coord = hr_coord[sample_lst] - hr_rgb = hr_rgb[sample_lst] - return_dict = { - 'inp': img_lr, - 'coord': hr_coord, - 'gt': hr_rgb, - 'class_name': class_name, - 'filename': file_name - } - - if self.encode_scale_ratio: - scale_ratio = torch.ones_like(hr_coord) * self.patch_size / self.hr_size - return_dict['scale_ratio'] = scale_ratio - - if self.return_cell: - cell = torch.ones_like(hr_coord) - cell[:, 0] *= 2 / img_hr.shape[-2] - cell[:, 1] *= 2 / img_hr.shape[-1] - return_dict['cell'] = cell - - return return_dict - - -@register('inr_range_scale_sr_warp') -class INRRangeScaleSRWarp(Dataset): - def __init__(self, - dataset, max_scale_ratio, patch_size=48, - augment=False, sample_q=None, - val_mode=False, test_mode=False, - encode_scale_ratio=False, - return_cell=False, # for liff - ): - super(INRRangeScaleSRWarp, self).__init__() - self.dataset = dataset - self.max_scale_ratio = max_scale_ratio - self.patch_size = patch_size - assert max_scale_ratio <= 8 - self.augment = augment - self.sample_q = sample_q - self.test_mode = test_mode - self.val_mode = val_mode - self.encode_scale_ratio = encode_scale_ratio - self.return_cell = return_cell - - def __len__(self): - return len(self.dataset) - - def __getitem__(self, idx): - img_hr, file_name = self.dataset[idx] - class_name = os.path.basename(os.path.dirname(file_name)) - h, w = img_hr.shape[-2:] - # if h < 256 or w < 256: - # img_hr = transforms.Resize(256, Image.BICUBIC)(img_hr) - - hr_size = self.patch_size + self.patch_size * torch.rand([]) * (self.max_scale_ratio - 1) - hr_size = int(hr_size) - - if self.test_mode or self.val_mode: - hr_size = int(self.patch_size * self.max_scale_ratio) - img_hr = transforms.CenterCrop(hr_size)(img_hr) - else: - img_hr = transforms.RandomCrop(hr_size)(img_hr) - if self.augment: - img_hr = transforms.RandomHorizontalFlip(p=0.5)(img_hr) - img_hr = transforms.RandomVerticalFlip(p=0.5)(img_hr) - img_hr = MyRotateTransform([90, 180, 270], p=0.5)(img_hr) - - img_lr = transforms.Resize(self.patch_size, Image.BICUBIC)(img_hr) - - hr_coord = to_coordinates(size=img_hr.shape[-2:], return_map=False) - hr_rgb = rearrange(img_hr, 'C H W -> (H W) C') - - if self.sample_q is not None and not self.test_mode: - sample_lst = np.random.choice( - len(hr_coord), self.sample_q, replace=False) - hr_coord = hr_coord[sample_lst] - hr_rgb = hr_rgb[sample_lst] - return_dict = { - 'inp': img_lr, - 'coord': hr_coord, - 'gt': hr_rgb, - 'class_name': class_name - } - if self.encode_scale_ratio: - scale_ratio = torch.ones_like(hr_coord) * self.patch_size / hr_size - return_dict['scale_ratio'] = scale_ratio - - if self.return_cell: - cell = torch.ones_like(hr_coord) - cell[:, 0] *= 2 / img_hr.shape[-2] - cell[:, 1] *= 2 / img_hr.shape[-1] - return_dict['cell'] = cell - - return return_dict diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/losses/ghm_loss.py b/spaces/KyanChen/RSPrompter/mmdet/models/losses/ghm_loss.py deleted file mode 100644 index a874c0038cc4a77769705a3a06a95a56d3e8dd2d..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/losses/ghm_loss.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from mmdet.registry import MODELS -from .utils import weight_reduce_loss - - -def _expand_onehot_labels(labels, label_weights, label_channels): - bin_labels = labels.new_full((labels.size(0), label_channels), 0) - inds = torch.nonzero( - (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze() - if inds.numel() > 0: - bin_labels[inds, labels[inds]] = 1 - bin_label_weights = label_weights.view(-1, 1).expand( - label_weights.size(0), label_channels) - return bin_labels, bin_label_weights - - -# TODO: code refactoring to make it consistent with other losses -@MODELS.register_module() -class GHMC(nn.Module): - """GHM Classification Loss. - - Details of the theorem can be viewed in the paper - `Gradient Harmonized Single-stage Detector - `_. - - Args: - bins (int): Number of the unit regions for distribution calculation. - momentum (float): The parameter for moving average. - use_sigmoid (bool): Can only be true for BCE based loss now. - loss_weight (float): The weight of the total GHM-C loss. - reduction (str): Options are "none", "mean" and "sum". - Defaults to "mean" - """ - - def __init__(self, - bins=10, - momentum=0, - use_sigmoid=True, - loss_weight=1.0, - reduction='mean'): - super(GHMC, self).__init__() - self.bins = bins - self.momentum = momentum - edges = torch.arange(bins + 1).float() / bins - self.register_buffer('edges', edges) - self.edges[-1] += 1e-6 - if momentum > 0: - acc_sum = torch.zeros(bins) - self.register_buffer('acc_sum', acc_sum) - self.use_sigmoid = use_sigmoid - if not self.use_sigmoid: - raise NotImplementedError - self.loss_weight = loss_weight - self.reduction = reduction - - def forward(self, - pred, - target, - label_weight, - reduction_override=None, - **kwargs): - """Calculate the GHM-C loss. - - Args: - pred (float tensor of size [batch_num, class_num]): - The direct prediction of classification fc layer. - target (float tensor of size [batch_num, class_num]): - Binary class target for each sample. - label_weight (float tensor of size [batch_num, class_num]): - the value is 1 if the sample is valid and 0 if ignored. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - Returns: - The gradient harmonized loss. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - # the target should be binary class label - if pred.dim() != target.dim(): - target, label_weight = _expand_onehot_labels( - target, label_weight, pred.size(-1)) - target, label_weight = target.float(), label_weight.float() - edges = self.edges - mmt = self.momentum - weights = torch.zeros_like(pred) - - # gradient length - g = torch.abs(pred.sigmoid().detach() - target) - - valid = label_weight > 0 - tot = max(valid.float().sum().item(), 1.0) - n = 0 # n valid bins - for i in range(self.bins): - inds = (g >= edges[i]) & (g < edges[i + 1]) & valid - num_in_bin = inds.sum().item() - if num_in_bin > 0: - if mmt > 0: - self.acc_sum[i] = mmt * self.acc_sum[i] \ - + (1 - mmt) * num_in_bin - weights[inds] = tot / self.acc_sum[i] - else: - weights[inds] = tot / num_in_bin - n += 1 - if n > 0: - weights = weights / n - - loss = F.binary_cross_entropy_with_logits( - pred, target, reduction='none') - loss = weight_reduce_loss( - loss, weights, reduction=reduction, avg_factor=tot) - return loss * self.loss_weight - - -# TODO: code refactoring to make it consistent with other losses -@MODELS.register_module() -class GHMR(nn.Module): - """GHM Regression Loss. - - Details of the theorem can be viewed in the paper - `Gradient Harmonized Single-stage Detector - `_. - - Args: - mu (float): The parameter for the Authentic Smooth L1 loss. - bins (int): Number of the unit regions for distribution calculation. - momentum (float): The parameter for moving average. - loss_weight (float): The weight of the total GHM-R loss. - reduction (str): Options are "none", "mean" and "sum". - Defaults to "mean" - """ - - def __init__(self, - mu=0.02, - bins=10, - momentum=0, - loss_weight=1.0, - reduction='mean'): - super(GHMR, self).__init__() - self.mu = mu - self.bins = bins - edges = torch.arange(bins + 1).float() / bins - self.register_buffer('edges', edges) - self.edges[-1] = 1e3 - self.momentum = momentum - if momentum > 0: - acc_sum = torch.zeros(bins) - self.register_buffer('acc_sum', acc_sum) - self.loss_weight = loss_weight - self.reduction = reduction - - # TODO: support reduction parameter - def forward(self, - pred, - target, - label_weight, - avg_factor=None, - reduction_override=None): - """Calculate the GHM-R loss. - - Args: - pred (float tensor of size [batch_num, 4 (* class_num)]): - The prediction of box regression layer. Channel number can be 4 - or 4 * class_num depending on whether it is class-agnostic. - target (float tensor of size [batch_num, 4 (* class_num)]): - The target regression values with the same size of pred. - label_weight (float tensor of size [batch_num, 4 (* class_num)]): - The weight of each sample, 0 if ignored. - reduction_override (str, optional): The reduction method used to - override the original reduction method of the loss. - Defaults to None. - Returns: - The gradient harmonized loss. - """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - mu = self.mu - edges = self.edges - mmt = self.momentum - - # ASL1 loss - diff = pred - target - loss = torch.sqrt(diff * diff + mu * mu) - mu - - # gradient length - g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach() - weights = torch.zeros_like(g) - - valid = label_weight > 0 - tot = max(label_weight.float().sum().item(), 1.0) - n = 0 # n: valid bins - for i in range(self.bins): - inds = (g >= edges[i]) & (g < edges[i + 1]) & valid - num_in_bin = inds.sum().item() - if num_in_bin > 0: - n += 1 - if mmt > 0: - self.acc_sum[i] = mmt * self.acc_sum[i] \ - + (1 - mmt) * num_in_bin - weights[inds] = tot / self.acc_sum[i] - else: - weights[inds] = tot / num_in_bin - if n > 0: - weights /= n - loss = weight_reduce_loss( - loss, weights, reduction=reduction, avg_factor=tot) - return loss * self.loss_weight diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/bbox_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/bbox_head.py deleted file mode 100644 index db9377990201d9f59d6d683f6496610d9aebf3f8..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/bbox_head.py +++ /dev/null @@ -1,708 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Optional, Tuple, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmengine.config import ConfigDict -from mmengine.model import BaseModule -from mmengine.structures import InstanceData -from torch import Tensor -from torch.nn.modules.utils import _pair - -from mmdet.models.layers import multiclass_nms -from mmdet.models.losses import accuracy -from mmdet.models.task_modules.samplers import SamplingResult -from mmdet.models.utils import empty_instances, multi_apply -from mmdet.registry import MODELS, TASK_UTILS -from mmdet.structures.bbox import get_box_tensor, scale_boxes -from mmdet.utils import ConfigType, InstanceList, OptMultiConfig - - -@MODELS.register_module() -class BBoxHead(BaseModule): - """Simplest RoI head, with only two fc layers for classification and - regression respectively.""" - - def __init__(self, - with_avg_pool: bool = False, - with_cls: bool = True, - with_reg: bool = True, - roi_feat_size: int = 7, - in_channels: int = 256, - num_classes: int = 80, - bbox_coder: ConfigType = dict( - type='DeltaXYWHBBoxCoder', - clip_border=True, - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - predict_box_type: str = 'hbox', - reg_class_agnostic: bool = False, - reg_decoded_bbox: bool = False, - reg_predictor_cfg: ConfigType = dict(type='mmdet.Linear'), - cls_predictor_cfg: ConfigType = dict(type='mmdet.Linear'), - loss_cls: ConfigType = dict( - type='CrossEntropyLoss', - use_sigmoid=False, - loss_weight=1.0), - loss_bbox: ConfigType = dict( - type='SmoothL1Loss', beta=1.0, loss_weight=1.0), - init_cfg: OptMultiConfig = None) -> None: - super().__init__(init_cfg=init_cfg) - assert with_cls or with_reg - self.with_avg_pool = with_avg_pool - self.with_cls = with_cls - self.with_reg = with_reg - self.roi_feat_size = _pair(roi_feat_size) - self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1] - self.in_channels = in_channels - self.num_classes = num_classes - self.predict_box_type = predict_box_type - self.reg_class_agnostic = reg_class_agnostic - self.reg_decoded_bbox = reg_decoded_bbox - self.reg_predictor_cfg = reg_predictor_cfg - self.cls_predictor_cfg = cls_predictor_cfg - - self.bbox_coder = TASK_UTILS.build(bbox_coder) - self.loss_cls = MODELS.build(loss_cls) - self.loss_bbox = MODELS.build(loss_bbox) - - in_channels = self.in_channels - if self.with_avg_pool: - self.avg_pool = nn.AvgPool2d(self.roi_feat_size) - else: - in_channels *= self.roi_feat_area - if self.with_cls: - # need to add background class - if self.custom_cls_channels: - cls_channels = self.loss_cls.get_cls_channels(self.num_classes) - else: - cls_channels = num_classes + 1 - cls_predictor_cfg_ = self.cls_predictor_cfg.copy() - cls_predictor_cfg_.update( - in_features=in_channels, out_features=cls_channels) - self.fc_cls = MODELS.build(cls_predictor_cfg_) - if self.with_reg: - box_dim = self.bbox_coder.encode_size - out_dim_reg = box_dim if reg_class_agnostic else \ - box_dim * num_classes - reg_predictor_cfg_ = self.reg_predictor_cfg.copy() - if isinstance(reg_predictor_cfg_, (dict, ConfigDict)): - reg_predictor_cfg_.update( - in_features=in_channels, out_features=out_dim_reg) - self.fc_reg = MODELS.build(reg_predictor_cfg_) - self.debug_imgs = None - if init_cfg is None: - self.init_cfg = [] - if self.with_cls: - self.init_cfg += [ - dict( - type='Normal', std=0.01, override=dict(name='fc_cls')) - ] - if self.with_reg: - self.init_cfg += [ - dict( - type='Normal', std=0.001, override=dict(name='fc_reg')) - ] - - # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead - @property - def custom_cls_channels(self) -> bool: - """get custom_cls_channels from loss_cls.""" - return getattr(self.loss_cls, 'custom_cls_channels', False) - - # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead - @property - def custom_activation(self) -> bool: - """get custom_activation from loss_cls.""" - return getattr(self.loss_cls, 'custom_activation', False) - - # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead - @property - def custom_accuracy(self) -> bool: - """get custom_accuracy from loss_cls.""" - return getattr(self.loss_cls, 'custom_accuracy', False) - - def forward(self, x: Tuple[Tensor]) -> tuple: - """Forward features from the upstream network. - - Args: - x (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: A tuple of classification scores and bbox prediction. - - - cls_score (Tensor): Classification scores for all - scale levels, each is a 4D-tensor, the channels number - is num_base_priors * num_classes. - - bbox_pred (Tensor): Box energies / deltas for all - scale levels, each is a 4D-tensor, the channels number - is num_base_priors * 4. - """ - if self.with_avg_pool: - if x.numel() > 0: - x = self.avg_pool(x) - x = x.view(x.size(0), -1) - else: - # avg_pool does not support empty tensor, - # so use torch.mean instead it - x = torch.mean(x, dim=(-1, -2)) - cls_score = self.fc_cls(x) if self.with_cls else None - bbox_pred = self.fc_reg(x) if self.with_reg else None - return cls_score, bbox_pred - - def _get_targets_single(self, pos_priors: Tensor, neg_priors: Tensor, - pos_gt_bboxes: Tensor, pos_gt_labels: Tensor, - cfg: ConfigDict) -> tuple: - """Calculate the ground truth for proposals in the single image - according to the sampling results. - - Args: - pos_priors (Tensor): Contains all the positive boxes, - has shape (num_pos, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - neg_priors (Tensor): Contains all the negative boxes, - has shape (num_neg, 4), the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - pos_gt_bboxes (Tensor): Contains gt_boxes for - all positive samples, has shape (num_pos, 4), - the last dimension 4 - represents [tl_x, tl_y, br_x, br_y]. - pos_gt_labels (Tensor): Contains gt_labels for - all positive samples, has shape (num_pos, ). - cfg (obj:`ConfigDict`): `train_cfg` of R-CNN. - - Returns: - Tuple[Tensor]: Ground truth for proposals - in a single image. Containing the following Tensors: - - - labels(Tensor): Gt_labels for all proposals, has - shape (num_proposals,). - - label_weights(Tensor): Labels_weights for all - proposals, has shape (num_proposals,). - - bbox_targets(Tensor):Regression target for all - proposals, has shape (num_proposals, 4), the - last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - - bbox_weights(Tensor):Regression weights for all - proposals, has shape (num_proposals, 4). - """ - num_pos = pos_priors.size(0) - num_neg = neg_priors.size(0) - num_samples = num_pos + num_neg - - # original implementation uses new_zeros since BG are set to be 0 - # now use empty & fill because BG cat_id = num_classes, - # FG cat_id = [0, num_classes-1] - labels = pos_priors.new_full((num_samples, ), - self.num_classes, - dtype=torch.long) - reg_dim = pos_gt_bboxes.size(-1) if self.reg_decoded_bbox \ - else self.bbox_coder.encode_size - label_weights = pos_priors.new_zeros(num_samples) - bbox_targets = pos_priors.new_zeros(num_samples, reg_dim) - bbox_weights = pos_priors.new_zeros(num_samples, reg_dim) - if num_pos > 0: - labels[:num_pos] = pos_gt_labels - pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight - label_weights[:num_pos] = pos_weight - if not self.reg_decoded_bbox: - pos_bbox_targets = self.bbox_coder.encode( - pos_priors, pos_gt_bboxes) - else: - # When the regression loss (e.g. `IouLoss`, `GIouLoss`) - # is applied directly on the decoded bounding boxes, both - # the predicted boxes and regression targets should be with - # absolute coordinate format. - pos_bbox_targets = get_box_tensor(pos_gt_bboxes) - bbox_targets[:num_pos, :] = pos_bbox_targets - bbox_weights[:num_pos, :] = 1 - if num_neg > 0: - label_weights[-num_neg:] = 1.0 - - return labels, label_weights, bbox_targets, bbox_weights - - def get_targets(self, - sampling_results: List[SamplingResult], - rcnn_train_cfg: ConfigDict, - concat: bool = True) -> tuple: - """Calculate the ground truth for all samples in a batch according to - the sampling_results. - - Almost the same as the implementation in bbox_head, we passed - additional parameters pos_inds_list and neg_inds_list to - `_get_targets_single` function. - - Args: - sampling_results (List[obj:SamplingResult]): Assign results of - all images in a batch after sampling. - rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. - concat (bool): Whether to concatenate the results of all - the images in a single batch. - - Returns: - Tuple[Tensor]: Ground truth for proposals in a single image. - Containing the following list of Tensors: - - - labels (list[Tensor],Tensor): Gt_labels for all - proposals in a batch, each tensor in list has - shape (num_proposals,) when `concat=False`, otherwise - just a single tensor has shape (num_all_proposals,). - - label_weights (list[Tensor]): Labels_weights for - all proposals in a batch, each tensor in list has - shape (num_proposals,) when `concat=False`, otherwise - just a single tensor has shape (num_all_proposals,). - - bbox_targets (list[Tensor],Tensor): Regression target - for all proposals in a batch, each tensor in list - has shape (num_proposals, 4) when `concat=False`, - otherwise just a single tensor has shape - (num_all_proposals, 4), the last dimension 4 represents - [tl_x, tl_y, br_x, br_y]. - - bbox_weights (list[tensor],Tensor): Regression weights for - all proposals in a batch, each tensor in list has shape - (num_proposals, 4) when `concat=False`, otherwise just a - single tensor has shape (num_all_proposals, 4). - """ - pos_priors_list = [res.pos_priors for res in sampling_results] - neg_priors_list = [res.neg_priors for res in sampling_results] - pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results] - pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results] - labels, label_weights, bbox_targets, bbox_weights = multi_apply( - self._get_targets_single, - pos_priors_list, - neg_priors_list, - pos_gt_bboxes_list, - pos_gt_labels_list, - cfg=rcnn_train_cfg) - - if concat: - labels = torch.cat(labels, 0) - label_weights = torch.cat(label_weights, 0) - bbox_targets = torch.cat(bbox_targets, 0) - bbox_weights = torch.cat(bbox_weights, 0) - return labels, label_weights, bbox_targets, bbox_weights - - def loss_and_target(self, - cls_score: Tensor, - bbox_pred: Tensor, - rois: Tensor, - sampling_results: List[SamplingResult], - rcnn_train_cfg: ConfigDict, - concat: bool = True, - reduction_override: Optional[str] = None) -> dict: - """Calculate the loss based on the features extracted by the bbox head. - - Args: - cls_score (Tensor): Classification prediction - results of all class, has shape - (batch_size * num_proposals_single_image, num_classes) - bbox_pred (Tensor): Regression prediction results, - has shape - (batch_size * num_proposals_single_image, 4), the last - dimension 4 represents [tl_x, tl_y, br_x, br_y]. - rois (Tensor): RoIs with the shape - (batch_size * num_proposals_single_image, 5) where the first - column indicates batch id of each RoI. - sampling_results (List[obj:SamplingResult]): Assign results of - all images in a batch after sampling. - rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. - concat (bool): Whether to concatenate the results of all - the images in a single batch. Defaults to True. - reduction_override (str, optional): The reduction - method used to override the original reduction - method of the loss. Options are "none", - "mean" and "sum". Defaults to None, - - Returns: - dict: A dictionary of loss and targets components. - The targets are only used for cascade rcnn. - """ - - cls_reg_targets = self.get_targets( - sampling_results, rcnn_train_cfg, concat=concat) - losses = self.loss( - cls_score, - bbox_pred, - rois, - *cls_reg_targets, - reduction_override=reduction_override) - - # cls_reg_targets is only for cascade rcnn - return dict(loss_bbox=losses, bbox_targets=cls_reg_targets) - - def loss(self, - cls_score: Tensor, - bbox_pred: Tensor, - rois: Tensor, - labels: Tensor, - label_weights: Tensor, - bbox_targets: Tensor, - bbox_weights: Tensor, - reduction_override: Optional[str] = None) -> dict: - """Calculate the loss based on the network predictions and targets. - - Args: - cls_score (Tensor): Classification prediction - results of all class, has shape - (batch_size * num_proposals_single_image, num_classes) - bbox_pred (Tensor): Regression prediction results, - has shape - (batch_size * num_proposals_single_image, 4), the last - dimension 4 represents [tl_x, tl_y, br_x, br_y]. - rois (Tensor): RoIs with the shape - (batch_size * num_proposals_single_image, 5) where the first - column indicates batch id of each RoI. - labels (Tensor): Gt_labels for all proposals in a batch, has - shape (batch_size * num_proposals_single_image, ). - label_weights (Tensor): Labels_weights for all proposals in a - batch, has shape (batch_size * num_proposals_single_image, ). - bbox_targets (Tensor): Regression target for all proposals in a - batch, has shape (batch_size * num_proposals_single_image, 4), - the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (Tensor): Regression weights for all proposals in a - batch, has shape (batch_size * num_proposals_single_image, 4). - reduction_override (str, optional): The reduction - method used to override the original reduction - method of the loss. Options are "none", - "mean" and "sum". Defaults to None, - - Returns: - dict: A dictionary of loss. - """ - - losses = dict() - - if cls_score is not None: - avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.) - if cls_score.numel() > 0: - loss_cls_ = self.loss_cls( - cls_score, - labels, - label_weights, - avg_factor=avg_factor, - reduction_override=reduction_override) - if isinstance(loss_cls_, dict): - losses.update(loss_cls_) - else: - losses['loss_cls'] = loss_cls_ - if self.custom_activation: - acc_ = self.loss_cls.get_accuracy(cls_score, labels) - losses.update(acc_) - else: - losses['acc'] = accuracy(cls_score, labels) - if bbox_pred is not None: - bg_class_ind = self.num_classes - # 0~self.num_classes-1 are FG, self.num_classes is BG - pos_inds = (labels >= 0) & (labels < bg_class_ind) - # do not perform bounding box regression for BG anymore. - if pos_inds.any(): - if self.reg_decoded_bbox: - # When the regression loss (e.g. `IouLoss`, - # `GIouLoss`, `DIouLoss`) is applied directly on - # the decoded bounding boxes, it decodes the - # already encoded coordinates to absolute format. - bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred) - bbox_pred = get_box_tensor(bbox_pred) - if self.reg_class_agnostic: - pos_bbox_pred = bbox_pred.view( - bbox_pred.size(0), -1)[pos_inds.type(torch.bool)] - else: - pos_bbox_pred = bbox_pred.view( - bbox_pred.size(0), self.num_classes, - -1)[pos_inds.type(torch.bool), - labels[pos_inds.type(torch.bool)]] - losses['loss_bbox'] = self.loss_bbox( - pos_bbox_pred, - bbox_targets[pos_inds.type(torch.bool)], - bbox_weights[pos_inds.type(torch.bool)], - avg_factor=bbox_targets.size(0), - reduction_override=reduction_override) - else: - losses['loss_bbox'] = bbox_pred[pos_inds].sum() - - return losses - - def predict_by_feat(self, - rois: Tuple[Tensor], - cls_scores: Tuple[Tensor], - bbox_preds: Tuple[Tensor], - batch_img_metas: List[dict], - rcnn_test_cfg: Optional[ConfigDict] = None, - rescale: bool = False) -> InstanceList: - """Transform a batch of output features extracted from the head into - bbox results. - - Args: - rois (tuple[Tensor]): Tuple of boxes to be transformed. - Each has shape (num_boxes, 5). last dimension 5 arrange as - (batch_index, x1, y1, x2, y2). - cls_scores (tuple[Tensor]): Tuple of box scores, each has shape - (num_boxes, num_classes + 1). - bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each - has shape (num_boxes, num_classes * 4). - batch_img_metas (list[dict]): List of image information. - rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN. - Defaults to None. - rescale (bool): If True, return boxes in original image space. - Defaults to False. - - Returns: - list[:obj:`InstanceData`]: Instance segmentation - results of each image after the post process. - Each item usually contains following keys. - - - scores (Tensor): Classification scores, has a shape - (num_instance, ) - - labels (Tensor): Labels of bboxes, has a shape - (num_instances, ). - - bboxes (Tensor): Has a shape (num_instances, 4), - the last dimension 4 arrange as (x1, y1, x2, y2). - """ - assert len(cls_scores) == len(bbox_preds) - result_list = [] - for img_id in range(len(batch_img_metas)): - img_meta = batch_img_metas[img_id] - results = self._predict_by_feat_single( - roi=rois[img_id], - cls_score=cls_scores[img_id], - bbox_pred=bbox_preds[img_id], - img_meta=img_meta, - rescale=rescale, - rcnn_test_cfg=rcnn_test_cfg) - result_list.append(results) - - return result_list - - def _predict_by_feat_single( - self, - roi: Tensor, - cls_score: Tensor, - bbox_pred: Tensor, - img_meta: dict, - rescale: bool = False, - rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: - """Transform a single image's features extracted from the head into - bbox results. - - Args: - roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). - last dimension 5 arrange as (batch_index, x1, y1, x2, y2). - cls_score (Tensor): Box scores, has shape - (num_boxes, num_classes + 1). - bbox_pred (Tensor): Box energies / deltas. - has shape (num_boxes, num_classes * 4). - img_meta (dict): image information. - rescale (bool): If True, return boxes in original image space. - Defaults to False. - rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. - Defaults to None - - Returns: - :obj:`InstanceData`: Detection results of each image\ - Each item usually contains following keys. - - - scores (Tensor): Classification scores, has a shape - (num_instance, ) - - labels (Tensor): Labels of bboxes, has a shape - (num_instances, ). - - bboxes (Tensor): Has a shape (num_instances, 4), - the last dimension 4 arrange as (x1, y1, x2, y2). - """ - results = InstanceData() - if roi.shape[0] == 0: - return empty_instances([img_meta], - roi.device, - task_type='bbox', - instance_results=[results], - box_type=self.predict_box_type, - use_box_type=False, - num_classes=self.num_classes, - score_per_cls=rcnn_test_cfg is None)[0] - - # some loss (Seesaw loss..) may have custom activation - if self.custom_cls_channels: - scores = self.loss_cls.get_activation(cls_score) - else: - scores = F.softmax( - cls_score, dim=-1) if cls_score is not None else None - - img_shape = img_meta['img_shape'] - num_rois = roi.size(0) - # bbox_pred would be None in some detector when with_reg is False, - # e.g. Grid R-CNN. - if bbox_pred is not None: - num_classes = 1 if self.reg_class_agnostic else self.num_classes - roi = roi.repeat_interleave(num_classes, dim=0) - bbox_pred = bbox_pred.view(-1, self.bbox_coder.encode_size) - bboxes = self.bbox_coder.decode( - roi[..., 1:], bbox_pred, max_shape=img_shape) - else: - bboxes = roi[:, 1:].clone() - if img_shape is not None and bboxes.size(-1) == 4: - bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1]) - bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0]) - - if rescale and bboxes.size(0) > 0: - assert img_meta.get('scale_factor') is not None - scale_factor = [1 / s for s in img_meta['scale_factor']] - bboxes = scale_boxes(bboxes, scale_factor) - - # Get the inside tensor when `bboxes` is a box type - bboxes = get_box_tensor(bboxes) - box_dim = bboxes.size(-1) - bboxes = bboxes.view(num_rois, -1) - - if rcnn_test_cfg is None: - # This means that it is aug test. - # It needs to return the raw results without nms. - results.bboxes = bboxes - results.scores = scores - else: - det_bboxes, det_labels = multiclass_nms( - bboxes, - scores, - rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms, - rcnn_test_cfg.max_per_img, - box_dim=box_dim) - results.bboxes = det_bboxes[:, :-1] - results.scores = det_bboxes[:, -1] - results.labels = det_labels - return results - - def refine_bboxes(self, sampling_results: Union[List[SamplingResult], - InstanceList], - bbox_results: dict, - batch_img_metas: List[dict]) -> InstanceList: - """Refine bboxes during training. - - Args: - sampling_results (List[:obj:`SamplingResult`] or - List[:obj:`InstanceData`]): Sampling results. - :obj:`SamplingResult` is the real sampling results - calculate from bbox_head, while :obj:`InstanceData` is - fake sampling results, e.g., in Sparse R-CNN or QueryInst, etc. - bbox_results (dict): Usually is a dictionary with keys: - - - `cls_score` (Tensor): Classification scores. - - `bbox_pred` (Tensor): Box energies / deltas. - - `rois` (Tensor): RoIs with the shape (n, 5) where the first - column indicates batch id of each RoI. - - `bbox_targets` (tuple): Ground truth for proposals in a - single image. Containing the following list of Tensors: - (labels, label_weights, bbox_targets, bbox_weights) - batch_img_metas (List[dict]): List of image information. - - Returns: - list[:obj:`InstanceData`]: Refined bboxes of each image. - - Example: - >>> # xdoctest: +REQUIRES(module:kwarray) - >>> import numpy as np - >>> from mmdet.models.task_modules.samplers. - ... sampling_result import random_boxes - >>> from mmdet.models.task_modules.samplers import SamplingResult - >>> self = BBoxHead(reg_class_agnostic=True) - >>> n_roi = 2 - >>> n_img = 4 - >>> scale = 512 - >>> rng = np.random.RandomState(0) - ... batch_img_metas = [{'img_shape': (scale, scale)} - >>> for _ in range(n_img)] - >>> sampling_results = [SamplingResult.random(rng=10) - ... for _ in range(n_img)] - >>> # Create rois in the expected format - >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng) - >>> img_ids = torch.randint(0, n_img, (n_roi,)) - >>> img_ids = img_ids.float() - >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1) - >>> # Create other args - >>> labels = torch.randint(0, 81, (scale,)).long() - >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng) - >>> cls_score = torch.randn((scale, 81)) - ... # For each image, pretend random positive boxes are gts - >>> bbox_targets = (labels, None, None, None) - ... bbox_results = dict(rois=rois, bbox_pred=bbox_preds, - ... cls_score=cls_score, - ... bbox_targets=bbox_targets) - >>> bboxes_list = self.refine_bboxes(sampling_results, - ... bbox_results, - ... batch_img_metas) - >>> print(bboxes_list) - """ - pos_is_gts = [res.pos_is_gt for res in sampling_results] - # bbox_targets is a tuple - labels = bbox_results['bbox_targets'][0] - cls_scores = bbox_results['cls_score'] - rois = bbox_results['rois'] - bbox_preds = bbox_results['bbox_pred'] - if self.custom_activation: - # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead - cls_scores = self.loss_cls.get_activation(cls_scores) - if cls_scores.numel() == 0: - return None - if cls_scores.shape[-1] == self.num_classes + 1: - # remove background class - cls_scores = cls_scores[:, :-1] - elif cls_scores.shape[-1] != self.num_classes: - raise ValueError('The last dim of `cls_scores` should equal to ' - '`num_classes` or `num_classes + 1`,' - f'but got {cls_scores.shape[-1]}.') - labels = torch.where(labels == self.num_classes, cls_scores.argmax(1), - labels) - - img_ids = rois[:, 0].long().unique(sorted=True) - assert img_ids.numel() <= len(batch_img_metas) - - results_list = [] - for i in range(len(batch_img_metas)): - inds = torch.nonzero( - rois[:, 0] == i, as_tuple=False).squeeze(dim=1) - num_rois = inds.numel() - - bboxes_ = rois[inds, 1:] - label_ = labels[inds] - bbox_pred_ = bbox_preds[inds] - img_meta_ = batch_img_metas[i] - pos_is_gts_ = pos_is_gts[i] - - bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_, - img_meta_) - # filter gt bboxes - pos_keep = 1 - pos_is_gts_ - keep_inds = pos_is_gts_.new_ones(num_rois) - keep_inds[:len(pos_is_gts_)] = pos_keep - results = InstanceData(bboxes=bboxes[keep_inds.type(torch.bool)]) - results_list.append(results) - - return results_list - - def regress_by_class(self, priors: Tensor, label: Tensor, - bbox_pred: Tensor, img_meta: dict) -> Tensor: - """Regress the bbox for the predicted class. Used in Cascade R-CNN. - - Args: - priors (Tensor): Priors from `rpn_head` or last stage - `bbox_head`, has shape (num_proposals, 4). - label (Tensor): Only used when `self.reg_class_agnostic` - is False, has shape (num_proposals, ). - bbox_pred (Tensor): Regression prediction of - current stage `bbox_head`. When `self.reg_class_agnostic` - is False, it has shape (n, num_classes * 4), otherwise - it has shape (n, 4). - img_meta (dict): Image meta info. - - Returns: - Tensor: Regressed bboxes, the same shape as input rois. - """ - reg_dim = self.bbox_coder.encode_size - if not self.reg_class_agnostic: - label = label * reg_dim - inds = torch.stack([label + i for i in range(reg_dim)], 1) - bbox_pred = torch.gather(bbox_pred, 1, inds) - assert bbox_pred.size()[1] == reg_dim - - max_shape = img_meta['img_shape'] - regressed_bboxes = self.bbox_coder.decode( - priors, bbox_pred, max_shape=max_shape) - return regressed_bboxes diff --git a/spaces/KyanChen/RSPrompter/mmpretrain/engine/hooks/ema_hook.py b/spaces/KyanChen/RSPrompter/mmpretrain/engine/hooks/ema_hook.py deleted file mode 100644 index 284d211b628c411f0eb712d1c558dc6aa2eb8996..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpretrain/engine/hooks/ema_hook.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy -import itertools -import warnings -from typing import Dict, Optional - -from mmengine.hooks import EMAHook as BaseEMAHook -from mmengine.logging import MMLogger -from mmengine.runner import Runner - -from mmpretrain.registry import HOOKS - - -@HOOKS.register_module() -class EMAHook(BaseEMAHook): - """A Hook to apply Exponential Moving Average (EMA) on the model during - training. - - Comparing with :class:`mmengine.hooks.EMAHook`, this hook accepts - ``evaluate_on_ema`` and ``evaluate_on_origin`` arguments. By default, the - ``evaluate_on_ema`` is enabled, and if you want to do validation and - testing on both original and EMA models, please set both arguments - ``True``. - - Note: - - EMAHook takes priority over CheckpointHook. - - The original model parameters are actually saved in ema field after - train. - - ``begin_iter`` and ``begin_epoch`` cannot be set at the same time. - - Args: - ema_type (str): The type of EMA strategy to use. You can find the - supported strategies in :mod:`mmengine.model.averaged_model`. - Defaults to 'ExponentialMovingAverage'. - strict_load (bool): Whether to strictly enforce that the keys of - ``state_dict`` in checkpoint match the keys returned by - ``self.module.state_dict``. Defaults to False. - Changed in v0.3.0. - begin_iter (int): The number of iteration to enable ``EMAHook``. - Defaults to 0. - begin_epoch (int): The number of epoch to enable ``EMAHook``. - Defaults to 0. - evaluate_on_ema (bool): Whether to evaluate (validate and test) - on EMA model during val-loop and test-loop. Defaults to True. - evaluate_on_origin (bool): Whether to evaluate (validate and test) - on the original model during val-loop and test-loop. - Defaults to False. - **kwargs: Keyword arguments passed to subclasses of - :obj:`BaseAveragedModel` - """ - - priority = 'NORMAL' - - def __init__(self, - ema_type: str = 'ExponentialMovingAverage', - strict_load: bool = False, - begin_iter: int = 0, - begin_epoch: int = 0, - evaluate_on_ema: bool = True, - evaluate_on_origin: bool = False, - **kwargs): - super().__init__( - ema_type=ema_type, - strict_load=strict_load, - begin_iter=begin_iter, - begin_epoch=begin_epoch, - **kwargs) - - if not evaluate_on_ema and not evaluate_on_origin: - warnings.warn( - 'Automatically set `evaluate_on_origin=True` since the ' - '`evaluate_on_ema` is disabled. If you want to disable ' - 'all validation, please modify the `val_interval` of ' - 'the `train_cfg`.', UserWarning) - evaluate_on_origin = True - - self.evaluate_on_ema = evaluate_on_ema - self.evaluate_on_origin = evaluate_on_origin - self.load_ema_from_ckpt = False - - def before_train(self, runner) -> None: - super().before_train(runner) - if not runner._resume and self.load_ema_from_ckpt: - # If loaded EMA state dict but not want to resume training - # overwrite the EMA state dict with the source model. - MMLogger.get_current_instance().info( - 'Load from a checkpoint with EMA parameters but not ' - 'resume training. Initialize the model parameters with ' - 'EMA parameters') - for p_ema, p_src in zip(self._ema_params, self._src_params): - p_src.data.copy_(p_ema.data) - - def before_val_epoch(self, runner) -> None: - """We load parameter values from ema model to source model before - validation. - - Args: - runner (Runner): The runner of the training process. - """ - if self.evaluate_on_ema: - # Swap when evaluate on ema - self._swap_ema_parameters() - - def after_val_epoch(self, - runner, - metrics: Optional[Dict[str, float]] = None) -> None: - """We recover source model's parameter from ema model after validation. - - Args: - runner (Runner): The runner of the validation process. - metrics (Dict[str, float], optional): Evaluation results of all - metrics on validation dataset. The keys are the names of the - metrics, and the values are corresponding results. - """ - if self.evaluate_on_ema: - # Swap when evaluate on ema - self._swap_ema_parameters() - - if self.evaluate_on_ema and self.evaluate_on_origin: - # Re-evaluate if evaluate on both ema and origin. - val_loop = runner.val_loop - - runner.model.eval() - for idx, data_batch in enumerate(val_loop.dataloader): - val_loop.run_iter(idx, data_batch) - - # compute metrics - origin_metrics = val_loop.evaluator.evaluate( - len(val_loop.dataloader.dataset)) - - for k, v in origin_metrics.items(): - runner.message_hub.update_scalar(f'val/{k}_origin', v) - - def before_test_epoch(self, runner) -> None: - """We load parameter values from ema model to source model before test. - - Args: - runner (Runner): The runner of the training process. - """ - if self.evaluate_on_ema: - # Swap when evaluate on ema - self._swap_ema_parameters() - MMLogger.get_current_instance().info('Start testing on EMA model.') - else: - MMLogger.get_current_instance().info( - 'Start testing on the original model.') - - def after_test_epoch(self, - runner: Runner, - metrics: Optional[Dict[str, float]] = None) -> None: - """We recover source model's parameter from ema model after test. - - Args: - runner (Runner): The runner of the testing process. - metrics (Dict[str, float], optional): Evaluation results of all - metrics on test dataset. The keys are the names of the - metrics, and the values are corresponding results. - """ - if self.evaluate_on_ema: - # Swap when evaluate on ema - self._swap_ema_parameters() - - if self.evaluate_on_ema and self.evaluate_on_origin: - # Re-evaluate if evaluate on both ema and origin. - MMLogger.get_current_instance().info( - 'Start testing on the original model.') - test_loop = runner.test_loop - - runner.model.eval() - for idx, data_batch in enumerate(test_loop.dataloader): - test_loop.run_iter(idx, data_batch) - - # compute metrics - origin_metrics = test_loop.evaluator.evaluate( - len(test_loop.dataloader.dataset)) - - for k, v in origin_metrics.items(): - runner.message_hub.update_scalar(f'test/{k}_origin', v) - - def after_load_checkpoint(self, runner, checkpoint: dict) -> None: - """Resume ema parameters from checkpoint. - - Args: - runner (Runner): The runner of the testing process. - """ - from mmengine.runner.checkpoint import load_state_dict - if 'ema_state_dict' in checkpoint: - # The original model parameters are actually saved in ema - # field swap the weights back to resume ema state. - self._swap_ema_state_dict(checkpoint) - self.ema_model.load_state_dict( - checkpoint['ema_state_dict'], strict=self.strict_load) - self.load_ema_from_ckpt = True - - # Support load checkpoint without ema state dict. - else: - load_state_dict( - self.ema_model.module, - copy.deepcopy(checkpoint['state_dict']), - strict=self.strict_load) - - @property - def _src_params(self): - if self.ema_model.update_buffers: - return itertools.chain(self.src_model.parameters(), - self.src_model.buffers()) - else: - return self.src_model.parameters() - - @property - def _ema_params(self): - if self.ema_model.update_buffers: - return itertools.chain(self.ema_model.module.parameters(), - self.ema_model.module.buffers()) - else: - return self.ema_model.module.parameters() diff --git a/spaces/LearnableAI/FinTextSummaryDemo/model.py b/spaces/LearnableAI/FinTextSummaryDemo/model.py deleted file mode 100644 index 485b6fc3ae9b9ed63344352c61803cacb3f395b6..0000000000000000000000000000000000000000 --- a/spaces/LearnableAI/FinTextSummaryDemo/model.py +++ /dev/null @@ -1,70 +0,0 @@ -""" - 文件说明: - GPT2模型文件,主要对transformers包中GPT2LMHeadModel的重写,修改计算loss部分,只计算预测title部分的loss -""" - -from torch.nn import CrossEntropyLoss -import torch.nn as nn -from transformers.models.gpt2.modeling_gpt2 import GPT2PreTrainedModel, GPT2Model - - -class GPT2LMHeadModel(GPT2PreTrainedModel): - """GPT2模型""" - def __init__(self, config): - """ - 初始化函数 - Args: - config: 配置参数 - """ - super().__init__(config) - self.transformer = GPT2Model(config) - self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) - self.init_weights() - - def forward(self, input_ids=None, past=None, token_type_ids=None, labels=None, title_id=None): - """ - 前向函数,计算GPT2预测结果值 - Args: - input_ids: 输入序列在词表中的索引序列,size:[batch_size, sequence_length] - past: 包含由模型预先计算好的隐藏状态,一般使用在预测阶段,用于加速顺序解码,防止重复计算前面计算过的token - token_type_ids: 用于区分输入序列中content和title的分隔符序列,size:[batch_size, sequence_length] - labels: 标签序列,size:[batch_size, sequence_length],一般情况下,与input_ids相同 - title_id: title部分分隔符的id - Returns: - - """ - # 获取GPT2模型的输出结果 - transformer_outputs = self.transformer(input_ids, past_key_values=past, token_type_ids=token_type_ids) - # 获取GPT2模型的最后一层的隐层节点状态,size:[batch_size, sequence_length, config.n_embd] - hidden_states = transformer_outputs[0] - # 预测隐层节点状态中的每一个token的下一个token,size:[batch_size, sequence_length, config.vocab_size] - lm_logits = self.lm_head(hidden_states) - # 拼接输出结果 - outputs = (lm_logits,) + transformer_outputs[1:] - # 如果labels不为None时,计算损失值loss,并拼接到输出结果中 - if labels is not None: - # 计算loss时,title_id不可以为None,因为需要title_id找到title的部分 - if title_id is None or token_type_ids is None: - raise Exception("当labels不为None时, title_id和token_type_ids均不可以为None。") - # 获取mask值,如果token_type_ids中等于title_id的部分需要计算loss,标记为1;否则为0。 - # size:[batch_size, sequence_length] - mask = (token_type_ids == title_id).long() - # 获取新的标签,size:[batch_size, sequence_length] - labels = labels * mask - # 对预测结果和标签进行偏移操作 - # GPT2的生成机制为通过前面的token,预测下一个token;并且labels与input_ids相同, - # 因此input_ids中的第一个token的预测结果,实际上是标签中的第二个token,以此类推,最终仅计算sequence_length-1个token的loss - shift_logits = lm_logits[..., :-1, :].contiguous() - shift_labels = labels[..., 1:].contiguous() - - # 定义损失函数CrossEntropyLoss,并且设置忽略计算loss的索引,以及返回loss的形式 - # 忽略shift_labels中为0的loss,也就是仅计算title部分的损失值 - # 对loss的计算方式设为sum,由于我们仅计算了itle部分的损失值,如果使用mean,会使loss变小(实际除的是sequence_length-1,不是title部分的真实长度) - loss_fct = CrossEntropyLoss(ignore_index=0, reduction="sum") - loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) - # 获取title部分的真实长度,并计算真实loss - num = shift_labels.ne(0).long().sum().item() - loss = loss / num - outputs = (loss,) + outputs - return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions) - diff --git a/spaces/LightChen2333/OpenSLU/model/decoder/interaction/gl_gin_interaction.py b/spaces/LightChen2333/OpenSLU/model/decoder/interaction/gl_gin_interaction.py deleted file mode 100644 index d6e697866719d3e71fca9a1ec91cd95dce81b9c1..0000000000000000000000000000000000000000 --- a/spaces/LightChen2333/OpenSLU/model/decoder/interaction/gl_gin_interaction.py +++ /dev/null @@ -1,227 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence - -from common.utils import HiddenData, ClassifierOutputData -from model.decoder.interaction import BaseInteraction - - -class LSTMEncoder(nn.Module): - """ - Encoder structure based on bidirectional LSTM. - """ - - def __init__(self, embedding_dim, hidden_dim, dropout_rate): - super(LSTMEncoder, self).__init__() - - # Parameter recording. - self.__embedding_dim = embedding_dim - self.__hidden_dim = hidden_dim // 2 - self.__dropout_rate = dropout_rate - - # Network attributes. - self.__dropout_layer = nn.Dropout(self.__dropout_rate) - self.__lstm_layer = nn.LSTM( - input_size=self.__embedding_dim, - hidden_size=self.__hidden_dim, - batch_first=True, - bidirectional=True, - dropout=self.__dropout_rate, - num_layers=1 - ) - - def forward(self, embedded_text, seq_lens): - """ Forward process for LSTM Encoder. - - (batch_size, max_sent_len) - -> (batch_size, max_sent_len, word_dim) - -> (batch_size, max_sent_len, hidden_dim) - - :param embedded_text: padded and embedded input text. - :param seq_lens: is the length of original input text. - :return: is encoded word hidden vectors. - """ - - # Padded_text should be instance of LongTensor. - dropout_text = self.__dropout_layer(embedded_text) - - # Pack and Pad process for input of variable length. - packed_text = pack_padded_sequence(dropout_text, seq_lens.cpu(), batch_first=True, enforce_sorted=False) - lstm_hiddens, (h_last, c_last) = self.__lstm_layer(packed_text) - padded_hiddens, _ = pad_packed_sequence(lstm_hiddens, batch_first=True) - - return padded_hiddens - - -class GraphAttentionLayer(nn.Module): - """ - Simple GAT layer, similar to https://arxiv.org/abs/1710.10903 - """ - - def __init__(self, in_features, out_features, dropout, alpha, concat=True): - super(GraphAttentionLayer, self).__init__() - self.dropout = dropout - self.in_features = in_features - self.out_features = out_features - self.alpha = alpha - self.concat = concat - - self.W = nn.Parameter(torch.zeros(size=(in_features, out_features))) - nn.init.xavier_uniform_(self.W.data, gain=1.414) - self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1))) - nn.init.xavier_uniform_(self.a.data, gain=1.414) - - self.leakyrelu = nn.LeakyReLU(self.alpha) - - def forward(self, input, adj): - h = torch.matmul(input, self.W) - B, N = h.size()[0], h.size()[1] - - a_input = torch.cat([h.repeat(1, 1, N).view(B, N * N, -1), h.repeat(1, N, 1)], dim=2).view(B, N, -1, - 2 * self.out_features) - e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3)) - - zero_vec = -9e15 * torch.ones_like(e) - attention = torch.where(adj > 0, e, zero_vec) - attention = F.softmax(attention, dim=2) - attention = F.dropout(attention, self.dropout, training=self.training) - h_prime = torch.matmul(attention, h) - - if self.concat: - return F.elu(h_prime) - else: - return h_prime - - -class GAT(nn.Module): - def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads, nlayers=2): - """Dense version of GAT.""" - super(GAT, self).__init__() - self.dropout = dropout - self.nlayers = nlayers - self.nheads = nheads - self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in - range(nheads)] - for i, attention in enumerate(self.attentions): - self.add_module('attention_{}'.format(i), attention) - if self.nlayers > 2: - for i in range(self.nlayers - 2): - for j in range(self.nheads): - self.add_module('attention_{}_{}'.format(i + 1, j), - GraphAttentionLayer(nhid * nheads, nhid, dropout=dropout, alpha=alpha, concat=True)) - - self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False) - - def forward(self, x, adj): - x = F.dropout(x, self.dropout, training=self.training) - input = x - x = torch.cat([att(x, adj) for att in self.attentions], dim=2) - if self.nlayers > 2: - for i in range(self.nlayers - 2): - temp = [] - x = F.dropout(x, self.dropout, training=self.training) - cur_input = x - for j in range(self.nheads): - temp.append(self.__getattr__('attention_{}_{}'.format(i + 1, j))(x, adj)) - x = torch.cat(temp, dim=2) + cur_input - x = F.dropout(x, self.dropout, training=self.training) - x = F.elu(self.out_att(x, adj)) - return x + input - - -def normalize_adj(mx): - """ - Row-normalize matrix D^{-1}A - torch.diag_embed: https://github.com/pytorch/pytorch/pull/12447 - """ - mx = mx.float() - rowsum = mx.sum(2) - r_inv = torch.pow(rowsum, -1) - r_inv[torch.isinf(r_inv)] = 0. - r_mat_inv = torch.diag_embed(r_inv, 0) - mx = r_mat_inv.matmul(mx) - return mx - - -class GLGINInteraction(BaseInteraction): - def __init__(self, **config): - super().__init__(**config) - self.intent_embedding = nn.Parameter( - torch.FloatTensor(self.config["intent_label_num"], self.config["intent_embedding_dim"])) # 191, 32 - nn.init.normal_(self.intent_embedding.data) - self.adj = None - self.__slot_lstm = LSTMEncoder( - self.config["input_dim"] + self.config["intent_label_num"], - config["output_dim"], - config["dropout_rate"] - ) - self.__slot_graph = GAT( - config["output_dim"], - config["hidden_dim"], - config["output_dim"], - config["dropout_rate"], - config["alpha"], - config["num_heads"], - config["num_layers"]) - - self.__global_graph = GAT( - config["output_dim"], - config["hidden_dim"], - config["output_dim"], - config["dropout_rate"], - config["alpha"], - config["num_heads"], - config["num_layers"]) - - def generate_global_adj_gat(self, seq_len, index, batch, window): - global_intent_idx = [[] for i in range(batch)] - global_slot_idx = [[] for i in range(batch)] - for item in index: - global_intent_idx[item[0]].append(item[1]) - - for i, len in enumerate(seq_len): - global_slot_idx[i].extend( - list(range(self.config["intent_label_num"], self.config["intent_label_num"] + len))) - - adj = torch.cat([torch.eye(self.config["intent_label_num"] + max(seq_len)).unsqueeze(0) for i in range(batch)]) - for i in range(batch): - for j in global_intent_idx[i]: - adj[i, j, global_slot_idx[i]] = 1. - adj[i, j, global_intent_idx[i]] = 1. - for j in global_slot_idx[i]: - adj[i, j, global_intent_idx[i]] = 1. - - for i in range(batch): - for j in range(self.config["intent_label_num"], self.config["intent_label_num"] + seq_len[i]): - adj[i, j, max(self.config["intent_label_num"], j - window):min(seq_len[i] + self.config["intent_label_num"], j + window + 1)] = 1. - - if self.config["row_normalized"]: - adj = normalize_adj(adj) - adj = adj.to(self.intent_embedding.device) - return adj - - def generate_slot_adj_gat(self, seq_len, batch, window): - slot_idx_ = [[] for i in range(batch)] - adj = torch.cat([torch.eye(max(seq_len)).unsqueeze(0) for i in range(batch)]) - for i in range(batch): - for j in range(seq_len[i]): - adj[i, j, max(0, j - window):min(seq_len[i], j + window + 1)] = 1. - if self.config["row_normalized"]: - adj = normalize_adj(adj) - adj = adj.to(self.intent_embedding.device) - return adj - - def forward(self, encode_hidden: HiddenData, pred_intent: ClassifierOutputData = None, intent_index=None): - seq_lens = encode_hidden.inputs.attention_mask.sum(-1) - slot_lstm_out = self.__slot_lstm(torch.cat([encode_hidden.slot_hidden, pred_intent.classifier_output], dim=-1), - seq_lens) - global_adj = self.generate_global_adj_gat(seq_lens, intent_index, len(seq_lens), - self.config["slot_graph_window"]) - slot_adj = self.generate_slot_adj_gat(seq_lens, len(seq_lens), self.config["slot_graph_window"]) - batch = len(seq_lens) - slot_graph_out = self.__slot_graph(slot_lstm_out, slot_adj) - intent_in = self.intent_embedding.unsqueeze(0).repeat(batch, 1, 1) - global_graph_in = torch.cat([intent_in, slot_graph_out], dim=1) - encode_hidden.update_slot_hidden_state(self.__global_graph(global_graph_in, global_adj)) - return encode_hidden diff --git "a/spaces/Liu-LAB/GPT-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" "b/spaces/Liu-LAB/GPT-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" deleted file mode 100644 index 554c485aa0891f74c57cacfcbe076febe7a11029..0000000000000000000000000000000000000000 --- "a/spaces/Liu-LAB/GPT-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" +++ /dev/null @@ -1,175 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -fast_debug = False - -class PaperFileGroup(): - def __init__(self): - self.file_paths = [] - self.file_contents = [] - self.sp_file_contents = [] - self.sp_file_index = [] - self.sp_file_tag = [] - - # count_token - from request_llm.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - self.get_token_num = get_token_num - - def run_file_split(self, max_token_limit=1900): - """ - 将长文本分离开来 - """ - for index, file_content in enumerate(self.file_contents): - if self.get_token_num(file_content) < max_token_limit: - self.sp_file_contents.append(file_content) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index]) - else: - from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit) - for j, segment in enumerate(segments): - self.sp_file_contents.append(segment) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex") - - print('Segmentation: done') - -def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'): - import time, os, re - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - - # <-------- 读取Latex文件,删除其中的所有注释 ----------> - pfg = PaperFileGroup() - - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - # 定义注释的正则表达式 - comment_pattern = r'(? - pfg.run_file_split(max_token_limit=1024) - n_split = len(pfg.sp_file_contents) - - # <-------- 抽取摘要 ----------> - # if language == 'en': - # abs_extract_inputs = f"Please write an abstract for this paper" - - # # 单线,获取文章meta信息 - # paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive( - # inputs=abs_extract_inputs, - # inputs_show_user=f"正在抽取摘要信息。", - # llm_kwargs=llm_kwargs, - # chatbot=chatbot, history=[], - # sys_prompt="Your job is to collect information from materials。", - # ) - - # <-------- 多线程润色开始 ----------> - if language == 'en->zh': - inputs_array = ["Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \section, \cite and equations:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - elif language == 'zh->en': - inputs_array = [f"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \section, \cite and equations:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[""] for _ in range(n_split)], - sys_prompt_array=sys_prompt_array, - # max_workers=5, # OpenAI所允许的最大并行过载 - scroller_max_len = 80 - ) - - # <-------- 整理结果,退出 ----------> - create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md" - res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name) - history = gpt_response_collection - chatbot.append((f"{fp}完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - - - - -@CatchException -def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh') - - - - - -@CatchException -def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - import glob, os - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') \ No newline at end of file diff --git "a/spaces/Liu-LAB/GPT-academic/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" "b/spaces/Liu-LAB/GPT-academic/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" deleted file mode 100644 index 8af98640bf51a4edf19254c05d68fad282d02776..0000000000000000000000000000000000000000 --- "a/spaces/Liu-LAB/GPT-academic/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" +++ /dev/null @@ -1,261 +0,0 @@ -import glob, time, os, re -from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion -from toolbox import CatchException, report_execption, write_history_to_file -from toolbox import promote_file_to_downloadzone, get_log_folder -fast_debug = False - -class PaperFileGroup(): - def __init__(self): - self.file_paths = [] - self.file_contents = [] - self.sp_file_contents = [] - self.sp_file_index = [] - self.sp_file_tag = [] - - # count_token - from request_llm.bridge_all import model_info - enc = model_info["gpt-3.5-turbo"]['tokenizer'] - def get_token_num(txt): return len(enc.encode(txt, disallowed_special=())) - self.get_token_num = get_token_num - - def run_file_split(self, max_token_limit=1900): - """ - 将长文本分离开来 - """ - for index, file_content in enumerate(self.file_contents): - if self.get_token_num(file_content) < max_token_limit: - self.sp_file_contents.append(file_content) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index]) - else: - from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf - segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit) - for j, segment in enumerate(segments): - self.sp_file_contents.append(segment) - self.sp_file_index.append(index) - self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md") - print('Segmentation: done') - - def merge_result(self): - self.file_result = ["" for _ in range(len(self.file_paths))] - for r, k in zip(self.sp_file_result, self.sp_file_index): - self.file_result[k] += r - - def write_result(self, language): - manifest = [] - for path, res in zip(self.file_paths, self.file_result): - dst_file = os.path.join(get_log_folder(), f'{gen_time_str()}.md') - with open(dst_file, 'w', encoding='utf8') as f: - manifest.append(dst_file) - f.write(res) - return manifest - -def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'): - from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency - - # <-------- 读取Markdown文件,删除其中的所有注释 ----------> - pfg = PaperFileGroup() - - for index, fp in enumerate(file_manifest): - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - # 记录删除注释后的文本 - pfg.file_paths.append(fp) - pfg.file_contents.append(file_content) - - # <-------- 拆分过长的Markdown文件 ----------> - pfg.run_file_split(max_token_limit=1500) - n_split = len(pfg.sp_file_contents) - - # <-------- 多线程翻译开始 ----------> - if language == 'en->zh': - inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - elif language == 'zh->en': - inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - else: - inputs_array = [f"This is a Markdown file, translate it into {language}, do not modify any existing Markdown commands, only answer me with translated results:" + - f"\n\n{frag}" for frag in pfg.sp_file_contents] - inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag] - sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)] - - gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency( - inputs_array=inputs_array, - inputs_show_user_array=inputs_show_user_array, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history_array=[[""] for _ in range(n_split)], - sys_prompt_array=sys_prompt_array, - # max_workers=5, # OpenAI所允许的最大并行过载 - scroller_max_len = 80 - ) - try: - pfg.sp_file_result = [] - for i_say, gpt_say in zip(gpt_response_collection[0::2], gpt_response_collection[1::2]): - pfg.sp_file_result.append(gpt_say) - pfg.merge_result() - pfg.write_result(language) - except: - print(trimmed_format_exc()) - - # <-------- 整理结果,退出 ----------> - create_report_file_name = gen_time_str() + f"-chatgpt.md" - res = write_history_to_file(gpt_response_collection, file_basename=create_report_file_name) - promote_file_to_downloadzone(res, chatbot=chatbot) - history = gpt_response_collection - chatbot.append((f"{fp}完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - -def get_files_from_everything(txt, preference=''): - if txt == "": return False, None, None - success = True - if txt.startswith('http'): - import requests - from toolbox import get_conf - proxies, = get_conf('proxies') - # 网络的远程文件 - if preference == 'Github': - print('正在从github下载资源 ...') - if not txt.endswith('.md'): - # Make a request to the GitHub API to retrieve the repository information - url = txt.replace("https://github.com/", "https://api.github.com/repos/") + '/readme' - response = requests.get(url, proxies=proxies) - txt = response.json()['download_url'] - else: - txt = txt.replace("https://github.com/", "https://raw.githubusercontent.com/") - txt = txt.replace("/blob/", "/") - - r = requests.get(txt, proxies=proxies) - download_local = f'{get_log_folder(plugin_name="批量Markdown翻译")}/raw-readme-{gen_time_str()}.md' - project_folder = f'{get_log_folder(plugin_name="批量Markdown翻译")}' - with open(download_local, 'wb+') as f: f.write(r.content) - file_manifest = [download_local] - elif txt.endswith('.md'): - # 直接给定文件 - file_manifest = [txt] - project_folder = os.path.dirname(txt) - elif os.path.exists(txt): - # 本地路径,递归搜索 - project_folder = txt - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)] - else: - project_folder = None - file_manifest = [] - success = False - - return success, file_manifest, project_folder - - -@CatchException -def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - disable_auto_promotion(chatbot) - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - - success, file_manifest, project_folder = get_files_from_everything(txt, preference="Github") - - if not success: - # 什么都没有 - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh') - - - - - -@CatchException -def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - disable_auto_promotion(chatbot) - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - success, file_manifest, project_folder = get_files_from_everything(txt) - if not success: - # 什么都没有 - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en') - - -@CatchException -def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - disable_auto_promotion(chatbot) - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import tiktoken - except: - report_execption(chatbot, history, - a=f"解析项目: {txt}", - b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - history = [] # 清空历史,以免输入溢出 - success, file_manifest, project_folder = get_files_from_everything(txt) - if not success: - # 什么都没有 - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - - if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg") - language = plugin_kwargs.get("advanced_arg", 'Chinese') - yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language=language) \ No newline at end of file diff --git a/spaces/LiuZiyi/1-video-video2txt-whisper-yt/app.py b/spaces/LiuZiyi/1-video-video2txt-whisper-yt/app.py deleted file mode 100644 index 914efc58e281850f11d294a8fd88f28057e6e144..0000000000000000000000000000000000000000 --- a/spaces/LiuZiyi/1-video-video2txt-whisper-yt/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import streamlit as st -import whisper -import os -from pytube import YouTube - -st.title("YouTube URL Video-to-Text") - -model = whisper.load_model("base") - -youtube_URL = st.text_input(label="Enter your YouTube URL") - -if youtube_URL: - # Get the video - video = YouTube(youtube_URL) - - # Convert video to audio - audio = video.streams.filter(only_audio=True).first() - output = audio.download(output_path = ".") - base, ext = os.path.splitext(output) - new_file = base + '.mp3' - - # Change the name of the file - os.rename(output, new_file) - - a = new_file - - # st.write(a) - result = model.transcribe(a) - st.write(result["text"]) -else: - st.error("☢️ Enter youtube URL") diff --git a/spaces/LongBeattz/runwayml-stable-diffusion-v1-5/app.py b/spaces/LongBeattz/runwayml-stable-diffusion-v1-5/app.py deleted file mode 100644 index a82df332731f067826d3e1ef79fabceffb74d07e..0000000000000000000000000000000000000000 --- a/spaces/LongBeattz/runwayml-stable-diffusion-v1-5/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch() \ No newline at end of file diff --git a/spaces/MAEBA96/SUMMARISER96/app.py b/spaces/MAEBA96/SUMMARISER96/app.py deleted file mode 100644 index c9e32a058fd184930cdf74bb3cf2f155a79b39cc..0000000000000000000000000000000000000000 --- a/spaces/MAEBA96/SUMMARISER96/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import gradio as gr -from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM - -def summarize_text(input_text, max_summary_length): - summarizer = pipeline( - task="summarization", - model="t5-base", - min_length=20, - max_length=1024, - truncation=True, - model_kwargs={"cache_dir": '/Documents/Huggin_Face/'} - ) - - # Generate the summary - output = summarizer(input_text, max_length=max_summary_length, min_length=30, do_sample=False) - summary = output[0]['summary_text'] - - return summary - -# Interface -iface = gr.Interface( - fn=summarize_text, - inputs=[ - gr.inputs.Textbox(lines=10, label="Upload or paste text", placeholder="Paste your text here..."), - gr.inputs.Slider(minimum=50, maximum=500, default=150, label="Max Summary Length"), - ], - outputs=gr.outputs.Textbox(label="NeuraShift AI Text Summarizer"), - layout="vertical", - theme="advanced", - live=True, - examples=None # Remove the examples -) - -iface.launch() diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/models/pix2pix_model.py b/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/models/pix2pix_model.py deleted file mode 100644 index 41d6df671752f11ab7001d5b1b3e82034c2e6493..0000000000000000000000000000000000000000 --- a/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/models/pix2pix_model.py +++ /dev/null @@ -1,246 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import torch -import models.networks as networks -import util.util as util - - -class Pix2PixModel(torch.nn.Module): - @staticmethod - def modify_commandline_options(parser, is_train): - networks.modify_commandline_options(parser, is_train) - return parser - - def __init__(self, opt): - super().__init__() - self.opt = opt - self.FloatTensor = torch.cuda.FloatTensor if self.use_gpu() else torch.FloatTensor - self.ByteTensor = torch.cuda.ByteTensor if self.use_gpu() else torch.ByteTensor - - self.netG, self.netD, self.netE = self.initialize_networks(opt) - - # set loss functions - if opt.isTrain: - self.criterionGAN = networks.GANLoss(opt.gan_mode, tensor=self.FloatTensor, opt=self.opt) - self.criterionFeat = torch.nn.L1Loss() - if not opt.no_vgg_loss: - self.criterionVGG = networks.VGGLoss(self.opt.gpu_ids) - if opt.use_vae: - self.KLDLoss = networks.KLDLoss() - - # Entry point for all calls involving forward pass - # of deep networks. We used this approach since DataParallel module - # can't parallelize custom functions, we branch to different - # routines based on |mode|. - def forward(self, data, mode): - input_semantics, real_image, degraded_image = self.preprocess_input(data) - - if mode == "generator": - g_loss, generated = self.compute_generator_loss(input_semantics, degraded_image, real_image) - return g_loss, generated - elif mode == "discriminator": - d_loss = self.compute_discriminator_loss(input_semantics, degraded_image, real_image) - return d_loss - elif mode == "encode_only": - z, mu, logvar = self.encode_z(real_image) - return mu, logvar - elif mode == "inference": - with torch.no_grad(): - fake_image, _ = self.generate_fake(input_semantics, degraded_image, real_image) - return fake_image - else: - raise ValueError("|mode| is invalid") - - def create_optimizers(self, opt): - G_params = list(self.netG.parameters()) - if opt.use_vae: - G_params += list(self.netE.parameters()) - if opt.isTrain: - D_params = list(self.netD.parameters()) - - beta1, beta2 = opt.beta1, opt.beta2 - if opt.no_TTUR: - G_lr, D_lr = opt.lr, opt.lr - else: - G_lr, D_lr = opt.lr / 2, opt.lr * 2 - - optimizer_G = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2)) - optimizer_D = torch.optim.Adam(D_params, lr=D_lr, betas=(beta1, beta2)) - - return optimizer_G, optimizer_D - - def save(self, epoch): - util.save_network(self.netG, "G", epoch, self.opt) - util.save_network(self.netD, "D", epoch, self.opt) - if self.opt.use_vae: - util.save_network(self.netE, "E", epoch, self.opt) - - ############################################################################ - # Private helper methods - ############################################################################ - - def initialize_networks(self, opt): - netG = networks.define_G(opt) - netD = networks.define_D(opt) if opt.isTrain else None - netE = networks.define_E(opt) if opt.use_vae else None - - if not opt.isTrain or opt.continue_train: - netG = util.load_network(netG, "G", opt.which_epoch, opt) - if opt.isTrain: - netD = util.load_network(netD, "D", opt.which_epoch, opt) - if opt.use_vae: - netE = util.load_network(netE, "E", opt.which_epoch, opt) - - return netG, netD, netE - - # preprocess the input, such as moving the tensors to GPUs and - # transforming the label map to one-hot encoding - # |data|: dictionary of the input data - - def preprocess_input(self, data): - # move to GPU and change data types - # data['label'] = data['label'].long() - - if not self.opt.isTrain: - if self.use_gpu(): - data["label"] = data["label"].cuda() - data["image"] = data["image"].cuda() - return data["label"], data["image"], data["image"] - - ## While testing, the input image is the degraded face - if self.use_gpu(): - data["label"] = data["label"].cuda() - data["degraded_image"] = data["degraded_image"].cuda() - data["image"] = data["image"].cuda() - - # # create one-hot label map - # label_map = data['label'] - # bs, _, h, w = label_map.size() - # nc = self.opt.label_nc + 1 if self.opt.contain_dontcare_label \ - # else self.opt.label_nc - # input_label = self.FloatTensor(bs, nc, h, w).zero_() - # input_semantics = input_label.scatter_(1, label_map, 1.0) - - return data["label"], data["image"], data["degraded_image"] - - def compute_generator_loss(self, input_semantics, degraded_image, real_image): - G_losses = {} - - fake_image, KLD_loss = self.generate_fake( - input_semantics, degraded_image, real_image, compute_kld_loss=self.opt.use_vae - ) - - if self.opt.use_vae: - G_losses["KLD"] = KLD_loss - - pred_fake, pred_real = self.discriminate(input_semantics, fake_image, real_image) - - G_losses["GAN"] = self.criterionGAN(pred_fake, True, for_discriminator=False) - - if not self.opt.no_ganFeat_loss: - num_D = len(pred_fake) - GAN_Feat_loss = self.FloatTensor(1).fill_(0) - for i in range(num_D): # for each discriminator - # last output is the final prediction, so we exclude it - num_intermediate_outputs = len(pred_fake[i]) - 1 - for j in range(num_intermediate_outputs): # for each layer output - unweighted_loss = self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach()) - GAN_Feat_loss += unweighted_loss * self.opt.lambda_feat / num_D - G_losses["GAN_Feat"] = GAN_Feat_loss - - if not self.opt.no_vgg_loss: - G_losses["VGG"] = self.criterionVGG(fake_image, real_image) * self.opt.lambda_vgg - - return G_losses, fake_image - - def compute_discriminator_loss(self, input_semantics, degraded_image, real_image): - D_losses = {} - with torch.no_grad(): - fake_image, _ = self.generate_fake(input_semantics, degraded_image, real_image) - fake_image = fake_image.detach() - fake_image.requires_grad_() - - pred_fake, pred_real = self.discriminate(input_semantics, fake_image, real_image) - - D_losses["D_Fake"] = self.criterionGAN(pred_fake, False, for_discriminator=True) - D_losses["D_real"] = self.criterionGAN(pred_real, True, for_discriminator=True) - - return D_losses - - def encode_z(self, real_image): - mu, logvar = self.netE(real_image) - z = self.reparameterize(mu, logvar) - return z, mu, logvar - - def generate_fake(self, input_semantics, degraded_image, real_image, compute_kld_loss=False): - z = None - KLD_loss = None - if self.opt.use_vae: - z, mu, logvar = self.encode_z(real_image) - if compute_kld_loss: - KLD_loss = self.KLDLoss(mu, logvar) * self.opt.lambda_kld - - fake_image = self.netG(input_semantics, degraded_image, z=z) - - assert ( - not compute_kld_loss - ) or self.opt.use_vae, "You cannot compute KLD loss if opt.use_vae == False" - - return fake_image, KLD_loss - - # Given fake and real image, return the prediction of discriminator - # for each fake and real image. - - def discriminate(self, input_semantics, fake_image, real_image): - - if self.opt.no_parsing_map: - fake_concat = fake_image - real_concat = real_image - else: - fake_concat = torch.cat([input_semantics, fake_image], dim=1) - real_concat = torch.cat([input_semantics, real_image], dim=1) - - # In Batch Normalization, the fake and real images are - # recommended to be in the same batch to avoid disparate - # statistics in fake and real images. - # So both fake and real images are fed to D all at once. - fake_and_real = torch.cat([fake_concat, real_concat], dim=0) - - discriminator_out = self.netD(fake_and_real) - - pred_fake, pred_real = self.divide_pred(discriminator_out) - - return pred_fake, pred_real - - # Take the prediction of fake and real images from the combined batch - def divide_pred(self, pred): - # the prediction contains the intermediate outputs of multiscale GAN, - # so it's usually a list - if type(pred) == list: - fake = [] - real = [] - for p in pred: - fake.append([tensor[: tensor.size(0) // 2] for tensor in p]) - real.append([tensor[tensor.size(0) // 2 :] for tensor in p]) - else: - fake = pred[: pred.size(0) // 2] - real = pred[pred.size(0) // 2 :] - - return fake, real - - def get_edges(self, t): - edge = self.ByteTensor(t.size()).zero_() - edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1]) - edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1]) - edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :]) - edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :]) - return edge.float() - - def reparameterize(self, mu, logvar): - std = torch.exp(0.5 * logvar) - eps = torch.randn_like(std) - return eps.mul(std) + mu - - def use_gpu(self): - return len(self.opt.gpu_ids) > 0 diff --git a/spaces/Mahiruoshi/MyGO_VIts-bert/bert/bert-base-japanese-v3/README.md b/spaces/Mahiruoshi/MyGO_VIts-bert/bert/bert-base-japanese-v3/README.md deleted file mode 100644 index 630f3eca8d375fe0242a0b5c6d2686e2241a780f..0000000000000000000000000000000000000000 --- a/spaces/Mahiruoshi/MyGO_VIts-bert/bert/bert-base-japanese-v3/README.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -license: apache-2.0 -datasets: -- cc100 -- wikipedia -language: -- ja -widget: -- text: 東北大学で[MASK]の研究をしています。 ---- - -# BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102) - -This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language. - -This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization. -Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective. - -The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/). - -## Model architecture - -The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads. - -## Training Data - -The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia. -For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023. -The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively. - -For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7). - -## Tokenization - -The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm. -The vocabulary size is 32768. - -We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization. - -## Training - -We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps. -For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once. - -For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/). - -## Licenses - -The pretrained models are distributed under the Apache License 2.0. - -## Acknowledgments - -This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program. \ No newline at end of file diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/utils/se_layer.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/utils/se_layer.py deleted file mode 100644 index 083bd7d1ccee909c900c7aed2cc928bf14727f3e..0000000000000000000000000000000000000000 --- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/utils/se_layer.py +++ /dev/null @@ -1,57 +0,0 @@ -import annotator.uniformer.mmcv as mmcv -import torch.nn as nn -from annotator.uniformer.mmcv.cnn import ConvModule - -from .make_divisible import make_divisible - - -class SELayer(nn.Module): - """Squeeze-and-Excitation Module. - - Args: - channels (int): The input (and output) channels of the SE layer. - ratio (int): Squeeze ratio in SELayer, the intermediate channel will be - ``int(channels/ratio)``. Default: 16. - conv_cfg (None or dict): Config dict for convolution layer. - Default: None, which means using conv2d. - act_cfg (dict or Sequence[dict]): Config dict for activation layer. - If act_cfg is a dict, two activation layers will be configured - by this dict. If act_cfg is a sequence of dicts, the first - activation layer will be configured by the first dict and the - second activation layer will be configured by the second dict. - Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0, - divisor=6.0)). - """ - - def __init__(self, - channels, - ratio=16, - conv_cfg=None, - act_cfg=(dict(type='ReLU'), - dict(type='HSigmoid', bias=3.0, divisor=6.0))): - super(SELayer, self).__init__() - if isinstance(act_cfg, dict): - act_cfg = (act_cfg, act_cfg) - assert len(act_cfg) == 2 - assert mmcv.is_tuple_of(act_cfg, dict) - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.conv1 = ConvModule( - in_channels=channels, - out_channels=make_divisible(channels // ratio, 8), - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - act_cfg=act_cfg[0]) - self.conv2 = ConvModule( - in_channels=make_divisible(channels // ratio, 8), - out_channels=channels, - kernel_size=1, - stride=1, - conv_cfg=conv_cfg, - act_cfg=act_cfg[1]) - - def forward(self, x): - out = self.global_avgpool(x) - out = self.conv1(out) - out = self.conv2(out) - return x * out diff --git a/spaces/Menna2211/ImCaptioning/pages/image-captioning.py b/spaces/Menna2211/ImCaptioning/pages/image-captioning.py deleted file mode 100644 index 55cd8d491982883341f7a55626f998e521908219..0000000000000000000000000000000000000000 --- a/spaces/Menna2211/ImCaptioning/pages/image-captioning.py +++ /dev/null @@ -1,116 +0,0 @@ -from transformers import BertTokenizer -import torch -import time -import streamlit as st -from PIL import Image -import torchvision.transforms as transforms -import requests -from transformers import BlipProcessor, BlipForConditionalGeneration - - -tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') -start_token = tokenizer.convert_tokens_to_ids(tokenizer._cls_token) -end_token = tokenizer.convert_tokens_to_ids(tokenizer._sep_token) -def create_caption_and_mask(start_token, max_length): - caption_template = torch.zeros((1, max_length), dtype=torch.long) - mask_template = torch.ones((1, max_length), dtype=torch.bool) - caption_template[:, 0] = start_token - mask_template[:, 0] = False - return caption_template, mask_template - -caption, cap_mask = create_caption_and_mask(start_token, 128) - -# Model 1 -@st.cache_resource(show_spinner=False ,ttl=3600) -def get_model1(): - processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") - model1 = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") - return processor, model1 - -processor, model1 =get_model1() - -# Model 2 -@st.cache_resource(show_spinner=False ,ttl=3600) -def get_model2(): - model2 = torch.hub.load('saahiluppal/catr', 'v3', pretrained=True) # you can choose between v1, v2 and v3 - return model2 - -model2 =get_model2() - -st.title("Image Captioning App") -# define the layout of your app -uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"]) -model = st.selectbox("Select a Model", ["Select a Model","Hugging-Face", "Github"]) -submit_button = st.button("Compute") -if model == "Select a Model" and not submit_button : - st.stop() -elif model == "Select a Model" and submit_button : - st.warning('Warning.....!!,Plz..... Select a Model ', icon="⚠️") - -if model == "Hugging-Face": - if uploaded_file is not None : - # Load the uploaded image - image = Image.open(uploaded_file).convert('RGB') - st.image(image) - # Use the pre-trained model to generate a caption for the uploaded image - progress_text = "Operation in progress. Please wait." - bar = st.progress(0, text=progress_text) - for percent_complete in range(100): - inputs = processor(image, return_tensors="pt") - out = model1.generate(**inputs , max_new_tokens=100) - time.sleep(0.1) - bar.progress(percent_complete + 1, text=progress_text) - - # Display the uploaded image and its generated caption - st.write("Generated Caption:") - st.write(processor.decode(out[0], skip_special_tokens=True)) - time.sleep(5) - st.success('Congratulations..!! task is done ', icon="✅") - st.balloons() - else: - st.error('Error...!!,Plz..... Upload an image' , icon="🚨") - -elif model == "Github": - if uploaded_file is not None : - # Load the uploaded image - im = Image.open(uploaded_file) - st.image(im) - # Preprocess the input image - transform = transforms.Compose([ - transforms.Resize((224, 224)), # Resize the image to 224x224 - transforms.ToTensor(), # Convert the image to a tensor - transforms.Normalize( # Normalize the image - mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225])]) - image = transform(im).unsqueeze(0) # Add a batch dimension - #@torch.no_grad() - def evaluate(): - for i in range(128-1): - predictions = model2(image, caption, cap_mask) - predictions = predictions[:, i, :] - predicted_id = torch.argmax(predictions, axis=-1) - - if predicted_id[0] == 102: - return caption - caption[:, i+1] = predicted_id[0] - cap_mask[:, i+1] = False - - return caption - - # Use the pre-trained model to generate a caption for the uploaded image - progress_text = "Operation in progress. Please wait." - bar = st.progress(0, text=progress_text) - for percent_complete in range(100): - output = evaluate() - time.sleep(0.1) - bar.progress(percent_complete + 1, text=progress_text) - - # Display the uploaded image and its generated caption - st.write("Generated Caption:") - result = tokenizer.decode(output[0].tolist(), skip_special_tokens=True) - st.write(result.capitalize()) - time.sleep(5) - st.success('Congratulations...!! task is done ', icon="✅") - st.balloons() - else: - st.error('Error...!!,Plz..... Upload an image' , icon="🚨") diff --git a/spaces/MestikonAgency/README/model.py b/spaces/MestikonAgency/README/model.py deleted file mode 100644 index 25a4bad7cf57b7d464ee1209874cfe24deaae78f..0000000000000000000000000000000000000000 --- a/spaces/MestikonAgency/README/model.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement. - -import math -from dataclasses import dataclass -from typing import Optional, Tuple - -import fairscale.nn.model_parallel.initialize as fs_init -import torch -import torch.nn.functional as F -from fairscale.nn.model_parallel.layers import ( - ColumnParallelLinear, - ParallelEmbedding, - RowParallelLinear, -) -from torch import nn - - -@dataclass -class ModelArgs: - dim: int = 4096 - n_layers: int = 32 - n_heads: int = 32 - n_kv_heads: Optional[int] = None - vocab_size: int = -1 # defined later by tokenizer - multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2 - ffn_dim_multiplier: Optional[float] = None - norm_eps: float = 1e-5 - - max_batch_size: int = 32 - max_seq_len: int = 2048 - - -class RMSNorm(torch.nn.Module): - def __init__(self, dim: int, eps: float = 1e-6): - """ - Initialize the RMSNorm normalization layer. - - Args: - dim (int): The dimension of the input tensor. - eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6. - - Attributes: - eps (float): A small value added to the denominator for numerical stability. - weight (nn.Parameter): Learnable scaling parameter. - - """ - super().__init__() - self.eps = eps - self.weight = nn.Parameter(torch.ones(dim)) - - def _norm(self, x): - """ - Apply the RMSNorm normalization to the input tensor. - - Args: - x (torch.Tensor): The input tensor. - - Returns: - torch.Tensor: The normalized tensor. - - """ - return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) - - def forward(self, x): - """ - Forward pass through the RMSNorm layer. - - Args: - x (torch.Tensor): The input tensor. - - Returns: - torch.Tensor: The output tensor after applying RMSNorm. - - """ - output = self._norm(x.float()).type_as(x) - return output * self.weight - - -def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0): - """ - Precompute the frequency tensor for complex exponentials (cis) with given dimensions. - - This function calculates a frequency tensor with complex exponentials using the given dimension 'dim' - and the end index 'end'. The 'theta' parameter scales the frequencies. - The returned tensor contains complex values in complex64 data type. - - Args: - dim (int): Dimension of the frequency tensor. - end (int): End index for precomputing frequencies. - theta (float, optional): Scaling factor for frequency computation. Defaults to 10000.0. - - Returns: - torch.Tensor: Precomputed frequency tensor with complex exponentials. - - - - - """ - freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim)) - t = torch.arange(end, device=freqs.device) # type: ignore - freqs = torch.outer(t, freqs).float() # type: ignore - freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64 - return freqs_cis - - -def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor): - """ - Reshape frequency tensor for broadcasting it with another tensor. - - This function reshapes the frequency tensor to have the same shape as the target tensor 'x' - for the purpose of broadcasting the frequency tensor during element-wise operations. - - Args: - freqs_cis (torch.Tensor): Frequency tensor to be reshaped. - x (torch.Tensor): Target tensor for broadcasting compatibility. - - Returns: - torch.Tensor: Reshaped frequency tensor. - - Raises: - AssertionError: If the frequency tensor doesn't match the expected shape. - AssertionError: If the target tensor 'x' doesn't have the expected number of dimensions. - """ - ndim = x.ndim - assert 0 <= 1 < ndim - assert freqs_cis.shape == (x.shape[1], x.shape[-1]) - shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] - return freqs_cis.view(*shape) - - -def apply_rotary_emb( - xq: torch.Tensor, - xk: torch.Tensor, - freqs_cis: torch.Tensor, -) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Apply rotary embeddings to input tensors using the given frequency tensor. - - This function applies rotary embeddings to the given query 'xq' and key 'xk' tensors using the provided - frequency tensor 'freqs_cis'. The input tensors are reshaped as complex numbers, and the frequency tensor - is reshaped for broadcasting compatibility. The resulting tensors contain rotary embeddings and are - returned as real tensors. - - Args: - xq (torch.Tensor): Query tensor to apply rotary embeddings. - xk (torch.Tensor): Key tensor to apply rotary embeddings. - freqs_cis (torch.Tensor): Precomputed frequency tensor for complex exponentials. - - Returns: - Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings. - - - - """ - xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) - xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) - freqs_cis = reshape_for_broadcast(freqs_cis, xq_) - xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) - xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) - return xq_out.type_as(xq), xk_out.type_as(xk) - - -def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor: - """torch.repeat_interleave(x, dim=2, repeats=n_rep)""" - bs, slen, n_kv_heads, head_dim = x.shape - if n_rep == 1: - return x - return ( - x[:, :, :, None, :] - .expand(bs, slen, n_kv_heads, n_rep, head_dim) - .reshape(bs, slen, n_kv_heads * n_rep, head_dim) - ) - - -class Attention(nn.Module): - """Multi-head attention module.""" - def __init__(self, args: ModelArgs): - """ - Initialize the Attention module. - - Args: - args (ModelArgs): Model configuration parameters. - - Attributes: - n_kv_heads (int): Number of key and value heads. - n_local_heads (int): Number of local query heads. - n_local_kv_heads (int): Number of local key and value heads. - n_rep (int): Number of repetitions for local heads. - head_dim (int): Dimension size of each attention head. - wq (ColumnParallelLinear): Linear transformation for queries. - wk (ColumnParallelLinear): Linear transformation for keys. - wv (ColumnParallelLinear): Linear transformation for values. - wo (RowParallelLinear): Linear transformation for output. - cache_k (torch.Tensor): Cached keys for attention. - cache_v (torch.Tensor): Cached values for attention. - - """ - super().__init__() - self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads - model_parallel_size = fs_init.get_model_parallel_world_size() - self.n_local_heads = args.n_heads // model_parallel_size - self.n_local_kv_heads = self.n_kv_heads // model_parallel_size - self.n_rep = self.n_local_heads // self.n_local_kv_heads - self.head_dim = args.dim // args.n_heads - - self.wq = ColumnParallelLinear( - args.dim, - args.n_heads * self.head_dim, - bias=False, - gather_output=False, - init_method=lambda x: x, - ) - self.wk = ColumnParallelLinear( - args.dim, - self.n_kv_heads * self.head_dim, - bias=False, - gather_output=False, - init_method=lambda x: x, - ) - self.wv = ColumnParallelLinear( - args.dim, - self.n_kv_heads * self.head_dim, - bias=False, - gather_output=False, - init_method=lambda x: x, - ) - self.wo = RowParallelLinear( - args.n_heads * self.head_dim, - args.dim, - bias=False, - input_is_parallel=True, - init_method=lambda x: x, - ) - - self.cache_k = torch.zeros( - ( - args.max_batch_size, - args.max_seq_len, - self.n_local_kv_heads, - self.head_dim, - ) - ).cuda() - self.cache_v = torch.zeros( - ( - args.max_batch_size, - args.max_seq_len, - self.n_local_kv_heads, - self.head_dim, - ) - ).cuda() - - def forward( - self, - x: torch.Tensor, - start_pos: int, - freqs_cis: torch.Tensor, - mask: Optional[torch.Tensor], - ): - """ - Forward pass of the attention module. - - Args: - x (torch.Tensor): Input tensor. - start_pos (int): Starting position for caching. - freqs_cis (torch.Tensor): Precomputed frequency tensor. - mask (torch.Tensor, optional): Attention mask tensor. - - Returns: - torch.Tensor: Output tensor after attention. - - """ - bsz, seqlen, _ = x.shape - xq, xk, xv = self.wq(x), self.wk(x), self.wv(x) - - xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim) - xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim) - xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim) - - xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis) - - self.cache_k = self.cache_k.to(xq) - self.cache_v = self.cache_v.to(xq) - - self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk - self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv - - keys = self.cache_k[:bsz, : start_pos + seqlen] - values = self.cache_v[:bsz, : start_pos + seqlen] - - # repeat k/v heads if n_kv_heads < n_heads - keys = repeat_kv(keys, self.n_rep) # (bs, seqlen, n_local_heads, head_dim) - values = repeat_kv(values, self.n_rep) # (bs, seqlen, n_local_heads, head_dim) - - xq = xq.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim) - keys = keys.transpose(1, 2) - values = values.transpose(1, 2) - scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim) - if mask is not None: - scores = scores + mask # (bs, n_local_heads, seqlen, cache_len + seqlen) - scores = F.softmax(scores.float(), dim=-1).type_as(xq) - output = torch.matmul(scores, values) # (bs, n_local_heads, seqlen, head_dim) - output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1) - return self.wo(output) - - -class FeedForward(nn.Module): - def __init__( - self, - dim: int, - hidden_dim: int, - multiple_of: int, - ffn_dim_multiplier: Optional[float], - ): - """ - Initialize the FeedForward module. - - Args: - dim (int): Input dimension. - hidden_dim (int): Hidden dimension of the feedforward layer. - multiple_of (int): Value to ensure hidden dimension is a multiple of this value. - ffn_dim_multiplier (float, optional): Custom multiplier for hidden dimension. Defaults to None. - - Attributes: - w1 (ColumnParallelLinear): Linear transformation for the first layer. - w2 (RowParallelLinear): Linear transformation for the second layer. - w3 (ColumnParallelLinear): Linear transformation for the third layer. - - """ - super().__init__() - hidden_dim = int(2 * hidden_dim / 3) - # custom dim factor multiplier - if ffn_dim_multiplier is not None: - hidden_dim = int(ffn_dim_multiplier * hidden_dim) - hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) - - self.w1 = ColumnParallelLinear( - dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x - ) - self.w2 = RowParallelLinear( - hidden_dim, dim, bias=False, input_is_parallel=True, init_method=lambda x: x - ) - self.w3 = ColumnParallelLinear( - dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x - ) - - def forward(self, x): - return self.w2(F.silu(self.w1(x)) * self.w3(x)) - - -class TransformerBlock(nn.Module): - def __init__(self, layer_id: int, args: ModelArgs): - """ - Initialize a TransformerBlock. - - Args: - layer_id (int): Identifier for the layer. - args (ModelArgs): Model configuration parameters. - - Attributes: - n_heads (int): Number of attention heads. - dim (int): Dimension size of the model. - head_dim (int): Dimension size of each attention head. - attention (Attention): Attention module. - feed_forward (FeedForward): FeedForward module. - layer_id (int): Identifier for the layer. - attention_norm (RMSNorm): Layer normalization for attention output. - ffn_norm (RMSNorm): Layer normalization for feedforward output. - - """ - super().__init__() - self.n_heads = args.n_heads - self.dim = args.dim - self.head_dim = args.dim // args.n_heads - self.attention = Attention(args) - self.feed_forward = FeedForward( - dim=args.dim, - hidden_dim=4 * args.dim, - multiple_of=args.multiple_of, - ffn_dim_multiplier=args.ffn_dim_multiplier, - ) - self.layer_id = layer_id - self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps) - self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps) - - def forward( - self, - x: torch.Tensor, - start_pos: int, - freqs_cis: torch.Tensor, - mask: Optional[torch.Tensor], - ): - """ - Perform a forward pass through the TransformerBlock. - - Args: - x (torch.Tensor): Input tensor. - start_pos (int): Starting position for attention caching. - freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies. - mask (torch.Tensor, optional): Masking tensor for attention. Defaults to None. - - Returns: - torch.Tensor: Output tensor after applying attention and feedforward layers. - - """ - h = x + self.attention.forward( - self.attention_norm(x), start_pos, freqs_cis, mask - ) - out = h + self.feed_forward.forward(self.ffn_norm(h)) - return out - - -class Transformer(nn.Module): - def __init__(self, params: ModelArgs): - """ - Initialize a Transformer model. - - Args: - params (ModelArgs): Model configuration parameters. - - Attributes: - params (ModelArgs): Model configuration parameters. - vocab_size (int): Vocabulary size. - n_layers (int): Number of layers in the model. - tok_embeddings (ParallelEmbedding): Token embeddings. - layers (torch.nn.ModuleList): List of Transformer blocks. - norm (RMSNorm): Layer normalization for the model output. - output (ColumnParallelLinear): Linear layer for final output. - freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies. - - """ - super().__init__() - self.params = params - self.vocab_size = params.vocab_size - self.n_layers = params.n_layers - - self.tok_embeddings = ParallelEmbedding( - params.vocab_size, params.dim, init_method=lambda x: x - ) - - self.layers = torch.nn.ModuleList() - for layer_id in range(params.n_layers): - self.layers.append(TransformerBlock(layer_id, params)) - - self.norm = RMSNorm(params.dim, eps=params.norm_eps) - self.output = ColumnParallelLinear( - params.dim, params.vocab_size, bias=False, init_method=lambda x: x - ) - - self.freqs_cis = precompute_freqs_cis( - self.params.dim // self.params.n_heads, self.params.max_seq_len * 2 - ) - - @torch.inference_mode() - def forward(self, tokens: torch.Tensor, start_pos: int): - """ - Perform a forward pass through the Transformer model. - - Args: - tokens (torch.Tensor): Input token indices. - start_pos (int): Starting position for attention caching. - - Returns: - torch.Tensor: Output logits after applying the Transformer model. - - """ - _bsz, seqlen = tokens.shape - h = self.tok_embeddings(tokens) - self.freqs_cis = self.freqs_cis.to(h.device) - freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen] - - mask = None - if seqlen > 1: - mask = torch.full( - (1, 1, seqlen, seqlen), float("-inf"), device=tokens.device - ) - mask = torch.triu(mask, diagonal=start_pos + 1).type_as(h) - - for layer in self.layers: - h = layer(h, start_pos, freqs_cis, mask) - h = self.norm(h) - output = self.output(h).float() - return output diff --git a/spaces/Metatron/LEO/Dockerfile b/spaces/Metatron/LEO/Dockerfile deleted file mode 100644 index 29ec24bfb63cdbf2c92fc41c33e24b329aa6e1ca..0000000000000000000000000000000000000000 --- a/spaces/Metatron/LEO/Dockerfile +++ /dev/null @@ -1,65 +0,0 @@ -FROM zenmldocker/zenml-server:latest - -ENV ZENML_ANALYTICS_OPT_IN=true -ENV ZENML_SERVER_DEPLOYMENT_TYPE="hf_spaces" -ENV ZENML_LOGGING_VERBOSITY=DEBUG - -################################################################################ -# -# CONFIGURING YOUR ZENML HF SPACES SERVER -# --------------------------------------- -# By default this space is not persistent. All ZenML metadata is stored in -# localstorage in a SQLite database. If you would like to make your storage -# persistent, use the appropriate environment variables below to configure the -# image to use a MySQL-compatible database service that is reachable from the -# container. See https://docs.zenml.io/getting-started/deploying-zenml/docker -# for more information on how to configure these environment variables. - -# You can also configure the secrets store to use for your ZenML server. Be -# sure to use Huggingface Spaces' 'Repository Secrets' feature to store any -# secrets referenced here. See -# https://huggingface.co/docs/hub/spaces-overview#managing-secrets for more -# information on how to configure these environment variables. - -# ENV ZENML_DEFAULT_PROJECT_NAME="" -# ENV ZENML_DEFAULT_USER_NAME="" -# ENV ZENML_DEFAULT_USER_PASSWORD="" -# ENV ZENML_STORE_URL="" -# ENV ZENML_STORE_SSL_CA="" -# ENV ZENML_STORE_SSL_CERT="" -# ENV ZENML_STORE_SSL_KEY="" -# ENV ZENML_STORE_SSL_VERIFY_SERVER_CERT="" - -# ENV ZENML_LOGGING_VERBOSITY="" - -# # SECRETS STORE CONFIGURATION -# ENV ZENML_SECRETS_STORE_TYPE="" -# ENV ZENML_SECRETS_STORE_ENCRYPTION_KEY="" -# ENV ZENML_SECRETS_STORE_CLASS_PATH="" -# ENV ZENML_JWT_SECRET_KEY="" - -# # AWS Secrets Store Configuration -# ENV ZENML_SECRETS_STORE_REGION_NAME="" -# ENV ZENML_SECRETS_STORE_AWS_ACCESS_KEY_ID="" -# ENV ZENML_SECRETS_STORE_AWS_SECRET_ACCESS_KEY="" -# ENV ZENML_SECRETS_STORE_AWS_SESSION_TOKEN="" -# ENV ZENML_SECRETS_STORE_SECRET_LIST_REFRESH_TIMEOUT="" - -# # GCP Secrets Store Configuration -# ENV ZENML_SECRETS_STORE_PROJECT_ID="" -# ENV GOOGLE_APPLICATION_CREDENTIALS="" - -# # Azure Secrets Store Configuration -# ENV ZENML_SECRETS_STORE_KEY_VAULT_NAME="" -# ENV ZENML_SECRETS_STORE_AZURE_CLIENT_ID="" -# ENV ZENML_SECRETS_STORE_AZURE_CLIENT_SECRET="" -# ENV ZENML_SECRETS_STORE_AZURE_TENANT_ID="" - -# # Hashicorp Secrets Store Configuration -# ENV ZENML_SECRETS_STORE_VAULT_ADDR="" -# ENV ZENML_SECRETS_STORE_VAULT_TOKEN="" -# ENV ZENML_SECRETS_STORE_VAULT_NAMESPACE="" -# ENV ZENML_SECRETS_STORE_MAX_VERSIONS="" - -ENTRYPOINT ["uvicorn", "zenml.zen_server.zen_server_api:app", "--log-level", "debug"] -CMD ["--proxy-headers", "--port", "8080", "--host", "0.0.0.0"] diff --git a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/__init__.py b/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Mountchicken/MAERec-Gradio/tools/dist_test.sh b/spaces/Mountchicken/MAERec-Gradio/tools/dist_test.sh deleted file mode 100644 index dea131b43ea8f1222661d20603d40c18ea7f28a1..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/tools/dist_test.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -CONFIG=$1 -CHECKPOINT=$2 -GPUS=$3 -NNODES=${NNODES:-1} -NODE_RANK=${NODE_RANK:-0} -PORT=${PORT:-29500} -MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} - -PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ -python -m torch.distributed.launch \ - --nnodes=$NNODES \ - --node_rank=$NODE_RANK \ - --master_addr=$MASTER_ADDR \ - --nproc_per_node=$GPUS \ - --master_port=$PORT \ - $(dirname "$0")/test.py \ - $CONFIG \ - $CHECKPOINT \ - --launcher pytorch \ - ${@:4} diff --git a/spaces/MrSinan/Reconstruction/fit_ellipse.py b/spaces/MrSinan/Reconstruction/fit_ellipse.py deleted file mode 100644 index c17e1201bbb3ab8ae5484a417e657abac194d2fc..0000000000000000000000000000000000000000 --- a/spaces/MrSinan/Reconstruction/fit_ellipse.py +++ /dev/null @@ -1,64 +0,0 @@ -# Author: aqeelanwar -# Created: 4 May,2020, 1:30 AM -# Email: aqeel.anwar@gatech.edu - -import numpy as np -from numpy.linalg import eig, inv - -def fitEllipse(x,y): - x = x[:,np.newaxis] - y = y[:,np.newaxis] - D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x))) - S = np.dot(D.T,D) - C = np.zeros([6,6]) - C[0,2] = C[2,0] = 2; C[1,1] = -1 - E, V = eig(np.dot(inv(S), C)) - n = np.argmax(np.abs(E)) - a = V[:,n] - return a - -def ellipse_center(a): - b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0] - num = b*b-a*c - x0=(c*d-b*f)/num - y0=(a*f-b*d)/num - return np.array([x0,y0]) - - -def ellipse_angle_of_rotation( a ): - b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0] - return 0.5*np.arctan(2*b/(a-c)) - - -def ellipse_axis_length( a ): - b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0] - up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g) - down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a)) - down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a)) - res1=np.sqrt(up/down1) - res2=np.sqrt(up/down2) - return np.array([res1, res2]) - -def ellipse_angle_of_rotation2( a ): - b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0] - if b == 0: - if a > c: - return 0 - else: - return np.pi/2 - else: - if a > c: - return np.arctan(2*b/(a-c))/2 - else: - return np.pi/2 + np.arctan(2*b/(a-c))/2 - -# a = fitEllipse(x,y) -# center = ellipse_center(a) -# #phi = ellipse_angle_of_rotation(a) -# phi = ellipse_angle_of_rotation2(a) -# axes = ellipse_axis_length(a) -# -# print("center = ", center) -# print("angle of rotation = ", phi) -# print("axes = ", axes) - diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/box_coder.py b/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/box_coder.py deleted file mode 100644 index f20ac956dfbce1fa69d1b9e6f5b023b704e1ec8a..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/box_coder.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Base box coder. - -Box coders convert between coordinate frames, namely image-centric -(with (0,0) on the top left of image) and anchor-centric (with (0,0) being -defined by a specific anchor). - -Users of a BoxCoder can call two methods: - encode: which encodes a box with respect to a given anchor - (or rather, a tensor of boxes wrt a corresponding tensor of anchors) and - decode: which inverts this encoding with a decode operation. -In both cases, the arguments are assumed to be in 1-1 correspondence already; -it is not the job of a BoxCoder to perform matching. -""" -from abc import ABCMeta -from abc import abstractmethod -from abc import abstractproperty - -import tensorflow as tf - - -# Box coder types. -FASTER_RCNN = 'faster_rcnn' -KEYPOINT = 'keypoint' -MEAN_STDDEV = 'mean_stddev' -SQUARE = 'square' - - -class BoxCoder(object): - """Abstract base class for box coder.""" - __metaclass__ = ABCMeta - - @abstractproperty - def code_size(self): - """Return the size of each code. - - This number is a constant and should agree with the output of the `encode` - op (e.g. if rel_codes is the output of self.encode(...), then it should have - shape [N, code_size()]). This abstractproperty should be overridden by - implementations. - - Returns: - an integer constant - """ - pass - - def encode(self, boxes, anchors): - """Encode a box list relative to an anchor collection. - - Args: - boxes: BoxList holding N boxes to be encoded - anchors: BoxList of N anchors - - Returns: - a tensor representing N relative-encoded boxes - """ - with tf.name_scope('Encode'): - return self._encode(boxes, anchors) - - def decode(self, rel_codes, anchors): - """Decode boxes that are encoded relative to an anchor collection. - - Args: - rel_codes: a tensor representing N relative-encoded boxes - anchors: BoxList of anchors - - Returns: - boxlist: BoxList holding N boxes encoded in the ordinary way (i.e., - with corners y_min, x_min, y_max, x_max) - """ - with tf.name_scope('Decode'): - return self._decode(rel_codes, anchors) - - @abstractmethod - def _encode(self, boxes, anchors): - """Method to be overriden by implementations. - - Args: - boxes: BoxList holding N boxes to be encoded - anchors: BoxList of N anchors - - Returns: - a tensor representing N relative-encoded boxes - """ - pass - - @abstractmethod - def _decode(self, rel_codes, anchors): - """Method to be overriden by implementations. - - Args: - rel_codes: a tensor representing N relative-encoded boxes - anchors: BoxList of anchors - - Returns: - boxlist: BoxList holding N boxes encoded in the ordinary way (i.e., - with corners y_min, x_min, y_max, x_max) - """ - pass - - -def batch_decode(encoded_boxes, box_coder, anchors): - """Decode a batch of encoded boxes. - - This op takes a batch of encoded bounding boxes and transforms - them to a batch of bounding boxes specified by their corners in - the order of [y_min, x_min, y_max, x_max]. - - Args: - encoded_boxes: a float32 tensor of shape [batch_size, num_anchors, - code_size] representing the location of the objects. - box_coder: a BoxCoder object. - anchors: a BoxList of anchors used to encode `encoded_boxes`. - - Returns: - decoded_boxes: a float32 tensor of shape [batch_size, num_anchors, - coder_size] representing the corners of the objects in the order - of [y_min, x_min, y_max, x_max]. - - Raises: - ValueError: if batch sizes of the inputs are inconsistent, or if - the number of anchors inferred from encoded_boxes and anchors are - inconsistent. - """ - encoded_boxes.get_shape().assert_has_rank(3) - if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static(): - raise ValueError('The number of anchors inferred from encoded_boxes' - ' and anchors are inconsistent: shape[1] of encoded_boxes' - ' %s should be equal to the number of anchors: %s.' % - (encoded_boxes.get_shape()[1].value, - anchors.num_boxes_static())) - - decoded_boxes = tf.stack([ - box_coder.decode(boxes, anchors).get() - for boxes in tf.unstack(encoded_boxes) - ]) - return decoded_boxes diff --git a/spaces/NCTCMumbai/NCTC/models/research/attention_ocr/python/eval.py b/spaces/NCTCMumbai/NCTC/models/research/attention_ocr/python/eval.py deleted file mode 100644 index ec68ad50bc25cd8528f4e9fd7976adad72782641..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/attention_ocr/python/eval.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2017 The TensorFlow Authors All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Script to evaluate a trained Attention OCR model. - -A simple usage example: -python eval.py -""" -import tensorflow as tf -from tensorflow.contrib import slim -from tensorflow import app -from tensorflow.python.platform import flags - -import data_provider -import common_flags - -FLAGS = flags.FLAGS -common_flags.define() - -# yapf: disable -flags.DEFINE_integer('num_batches', 100, - 'Number of batches to run eval for.') - -flags.DEFINE_string('eval_log_dir', '/tmp/attention_ocr/eval', - 'Directory where the evaluation results are saved to.') - -flags.DEFINE_integer('eval_interval_secs', 60, - 'Frequency in seconds to run evaluations.') - -flags.DEFINE_integer('number_of_steps', None, - 'Number of times to run evaluation.') -# yapf: enable - - -def main(_): - if not tf.gfile.Exists(FLAGS.eval_log_dir): - tf.gfile.MakeDirs(FLAGS.eval_log_dir) - - dataset = common_flags.create_dataset(split_name=FLAGS.split_name) - model = common_flags.create_model(dataset.num_char_classes, - dataset.max_sequence_length, - dataset.num_of_views, dataset.null_code) - data = data_provider.get_data( - dataset, - FLAGS.batch_size, - augment=False, - central_crop_size=common_flags.get_crop_size()) - endpoints = model.create_base(data.images, labels_one_hot=None) - model.create_loss(data, endpoints) - eval_ops = model.create_summaries( - data, endpoints, dataset.charset, is_training=False) - slim.get_or_create_global_step() - session_config = tf.ConfigProto(device_count={"GPU": 0}) - slim.evaluation.evaluation_loop( - master=FLAGS.master, - checkpoint_dir=FLAGS.train_log_dir, - logdir=FLAGS.eval_log_dir, - eval_op=eval_ops, - num_evals=FLAGS.num_batches, - eval_interval_secs=FLAGS.eval_interval_secs, - max_number_of_evaluations=FLAGS.number_of_steps, - session_config=session_config) - - -if __name__ == '__main__': - app.run() diff --git a/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/preprocessing/__init__.py b/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/preprocessing/__init__.py deleted file mode 100644 index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000 --- a/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/preprocessing/__init__.py +++ /dev/null @@ -1 +0,0 @@ - diff --git a/spaces/NSect/VALL-E-X/utils/g2p/symbols.py b/spaces/NSect/VALL-E-X/utils/g2p/symbols.py deleted file mode 100644 index 789e9df25d3d93d1976ef22d15d77f51d170ed00..0000000000000000000000000000000000000000 --- a/spaces/NSect/VALL-E-X/utils/g2p/symbols.py +++ /dev/null @@ -1,76 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -# japanese_cleaners -# _pad = '_' -# _punctuation = ',.!?-' -# _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' - - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -# # zh_ja_mixture_cleaners -# _pad = '_' -# _punctuation = ',.!?-~…' -# _letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' - - -'''# sanskrit_cleaners -_pad = '_' -_punctuation = '।' -_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ ' -''' - -'''# cjks_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ ' -''' - -'''# thai_cleaners -_pad = '_' -_punctuation = '.!? ' -_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์' -''' - -# # cjke_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ ' - - -'''# shanghainese_cleaners -_pad = '_' -_punctuation = ',.!?…' -_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 ' -''' - -'''# chinese_dialect_cleaners -_pad = '_' -_punctuation = ',.!?~…─' -_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚ᴀᴇ↑↓∅ⱼ ' -''' - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") diff --git a/spaces/NiuTaipu/moe-tts-test01/monotonic_align/core.py b/spaces/NiuTaipu/moe-tts-test01/monotonic_align/core.py deleted file mode 100644 index 1f940605fe4fd0738fa0006149fcba14ef88223a..0000000000000000000000000000000000000000 --- a/spaces/NiuTaipu/moe-tts-test01/monotonic_align/core.py +++ /dev/null @@ -1,36 +0,0 @@ -import numba - - -@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]), - nopython=True, nogil=True) -def maximum_path_jit(paths, values, t_ys, t_xs): - b = paths.shape[0] - max_neg_val = -1e9 - for i in range(int(b)): - path = paths[i] - value = values[i] - t_y = t_ys[i] - t_x = t_xs[i] - - v_prev = v_cur = 0.0 - index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 diff --git a/spaces/Norod78/ComicsHero/app.py b/spaces/Norod78/ComicsHero/app.py deleted file mode 100644 index 134e29a5d187d7e669ae0ecd5748aad68bc2389e..0000000000000000000000000000000000000000 --- a/spaces/Norod78/ComicsHero/app.py +++ /dev/null @@ -1,89 +0,0 @@ -import os -os.system("pip install dlib") -import sys -import face_detection -import PIL -from PIL import Image, ImageOps -import numpy as np - -import torch -torch.set_grad_enabled(False) -net = torch.jit.load('ComicsHeroesReduced_v2E03_Traced_Script_CPU.pt') -net.eval() - - -def tensor2im(var): - var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy() - var = ((var + 1) / 2) - var[var < 0] = 0 - var[var > 1] = 1 - var = var * 255 - return Image.fromarray(var.astype('uint8')) - -def image_as_array(image_in): - im_array = np.array(image_in, np.float32) - im_array = (im_array/255)*2 - 1 - im_array = np.transpose(im_array, (2, 0, 1)) - im_array = np.expand_dims(im_array, 0) - return im_array - -def find_aligned_face(image_in, size=512): - aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size) - return aligned_image, n_faces, quad - -def align_first_face(image_in, size=512): - aligned_image, n_faces, quad = find_aligned_face(image_in,size=size) - if n_faces == 0: - image_in = image_in.resize((size, size)) - im_array = image_as_array(image_in) - else: - im_array = image_as_array(aligned_image) - - return im_array - -def img_concat_h(im1, im2): - dst = Image.new('RGB', (im1.width + im2.width, im1.height)) - dst.paste(im1, (0, 0)) - dst.paste(im2, (im1.width, 0)) - return dst - -import gradio as gr - -def face2hero( - img: Image.Image, - size: int -) -> Image.Image: - - aligned_img = align_first_face(img) - if aligned_img is None: - output=None - else: - input = torch.Tensor(aligned_img) - output = net(input) - output = tensor2im(output[0]) - output = img_concat_h(tensor2im(torch.Tensor(aligned_img)[0]), output) - - return output - -import os -import collections -from typing import Union, List -import numpy as np -from PIL import Image -import PIL.Image -import PIL.ImageFile -import numpy as np -import scipy.ndimage -import requests - -def inference(img): - out = face2hero(img, 512) - return out - - -title = "Comics hero" -description = "Turn a face into the face of a \"Comics hero\". Upload an image with a face, or click on one of the examples below. If a face could not be detected, an image will still be created." -article = "

    See the Github Repo

    samples: Sample00001Sample00002Sample00003Sample00004Sample00005

    The \"Comics Hero\" model was trained using Pix2PixHD by Doron Adler

    " - -examples=[['Example00001.jpg'],['Example00002.jpg'],['Example00003.jpg'],['Example00004.jpg'],['Example00005.jpg'], ['Example00006.jpg']] -gr.Interface(inference, gr.inputs.Image(type="pil",shape=(512,512)), gr.outputs.Image(type="pil"),title=title,description=description,article=article,examples=examples,enable_queue=True,allow_flagging=False).launch() diff --git a/spaces/OAOA/DifFace/basicsr/archs/ridnet_arch.py b/spaces/OAOA/DifFace/basicsr/archs/ridnet_arch.py deleted file mode 100644 index 85bb9ae0348e27dd6c797c03f8d9ec43f8b0b829..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/basicsr/archs/ridnet_arch.py +++ /dev/null @@ -1,180 +0,0 @@ -import torch -import torch.nn as nn - -from basicsr.utils.registry import ARCH_REGISTRY -from .arch_util import ResidualBlockNoBN, make_layer - - -class MeanShift(nn.Conv2d): - """ Data normalization with mean and std. - - Args: - rgb_range (int): Maximum value of RGB. - rgb_mean (list[float]): Mean for RGB channels. - rgb_std (list[float]): Std for RGB channels. - sign (int): For subtraction, sign is -1, for addition, sign is 1. - Default: -1. - requires_grad (bool): Whether to update the self.weight and self.bias. - Default: True. - """ - - def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1, requires_grad=True): - super(MeanShift, self).__init__(3, 3, kernel_size=1) - std = torch.Tensor(rgb_std) - self.weight.data = torch.eye(3).view(3, 3, 1, 1) - self.weight.data.div_(std.view(3, 1, 1, 1)) - self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) - self.bias.data.div_(std) - self.requires_grad = requires_grad - - -class EResidualBlockNoBN(nn.Module): - """Enhanced Residual block without BN. - - There are three convolution layers in residual branch. - """ - - def __init__(self, in_channels, out_channels): - super(EResidualBlockNoBN, self).__init__() - - self.body = nn.Sequential( - nn.Conv2d(in_channels, out_channels, 3, 1, 1), - nn.ReLU(inplace=True), - nn.Conv2d(out_channels, out_channels, 3, 1, 1), - nn.ReLU(inplace=True), - nn.Conv2d(out_channels, out_channels, 1, 1, 0), - ) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - out = self.body(x) - out = self.relu(out + x) - return out - - -class MergeRun(nn.Module): - """ Merge-and-run unit. - - This unit contains two branches with different dilated convolutions, - followed by a convolution to process the concatenated features. - - Paper: Real Image Denoising with Feature Attention - Ref git repo: https://github.com/saeed-anwar/RIDNet - """ - - def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=1): - super(MergeRun, self).__init__() - - self.dilation1 = nn.Sequential( - nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding), nn.ReLU(inplace=True), - nn.Conv2d(out_channels, out_channels, kernel_size, stride, 2, 2), nn.ReLU(inplace=True)) - self.dilation2 = nn.Sequential( - nn.Conv2d(in_channels, out_channels, kernel_size, stride, 3, 3), nn.ReLU(inplace=True), - nn.Conv2d(out_channels, out_channels, kernel_size, stride, 4, 4), nn.ReLU(inplace=True)) - - self.aggregation = nn.Sequential( - nn.Conv2d(out_channels * 2, out_channels, kernel_size, stride, padding), nn.ReLU(inplace=True)) - - def forward(self, x): - dilation1 = self.dilation1(x) - dilation2 = self.dilation2(x) - out = torch.cat([dilation1, dilation2], dim=1) - out = self.aggregation(out) - out = out + x - return out - - -class ChannelAttention(nn.Module): - """Channel attention. - - Args: - num_feat (int): Channel number of intermediate features. - squeeze_factor (int): Channel squeeze factor. Default: - """ - - def __init__(self, mid_channels, squeeze_factor=16): - super(ChannelAttention, self).__init__() - self.attention = nn.Sequential( - nn.AdaptiveAvgPool2d(1), nn.Conv2d(mid_channels, mid_channels // squeeze_factor, 1, padding=0), - nn.ReLU(inplace=True), nn.Conv2d(mid_channels // squeeze_factor, mid_channels, 1, padding=0), nn.Sigmoid()) - - def forward(self, x): - y = self.attention(x) - return x * y - - -class EAM(nn.Module): - """Enhancement attention modules (EAM) in RIDNet. - - This module contains a merge-and-run unit, a residual block, - an enhanced residual block and a feature attention unit. - - Attributes: - merge: The merge-and-run unit. - block1: The residual block. - block2: The enhanced residual block. - ca: The feature/channel attention unit. - """ - - def __init__(self, in_channels, mid_channels, out_channels): - super(EAM, self).__init__() - - self.merge = MergeRun(in_channels, mid_channels) - self.block1 = ResidualBlockNoBN(mid_channels) - self.block2 = EResidualBlockNoBN(mid_channels, out_channels) - self.ca = ChannelAttention(out_channels) - # The residual block in the paper contains a relu after addition. - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - out = self.merge(x) - out = self.relu(self.block1(out)) - out = self.block2(out) - out = self.ca(out) - return out - - -@ARCH_REGISTRY.register() -class RIDNet(nn.Module): - """RIDNet: Real Image Denoising with Feature Attention. - - Ref git repo: https://github.com/saeed-anwar/RIDNet - - Args: - in_channels (int): Channel number of inputs. - mid_channels (int): Channel number of EAM modules. - Default: 64. - out_channels (int): Channel number of outputs. - num_block (int): Number of EAM. Default: 4. - img_range (float): Image range. Default: 255. - rgb_mean (tuple[float]): Image mean in RGB orders. - Default: (0.4488, 0.4371, 0.4040), calculated from DIV2K dataset. - """ - - def __init__(self, - in_channels, - mid_channels, - out_channels, - num_block=4, - img_range=255., - rgb_mean=(0.4488, 0.4371, 0.4040), - rgb_std=(1.0, 1.0, 1.0)): - super(RIDNet, self).__init__() - - self.sub_mean = MeanShift(img_range, rgb_mean, rgb_std) - self.add_mean = MeanShift(img_range, rgb_mean, rgb_std, 1) - - self.head = nn.Conv2d(in_channels, mid_channels, 3, 1, 1) - self.body = make_layer( - EAM, num_block, in_channels=mid_channels, mid_channels=mid_channels, out_channels=mid_channels) - self.tail = nn.Conv2d(mid_channels, out_channels, 3, 1, 1) - - self.relu = nn.ReLU(inplace=True) - - def forward(self, x): - res = self.sub_mean(x) - res = self.tail(self.body(self.relu(self.head(res)))) - res = self.add_mean(res) - - out = x + res - return out diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/criss/download_and_preprocess_tatoeba.sh b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/criss/download_and_preprocess_tatoeba.sh deleted file mode 100644 index 7ed64f017d5e62695ba73745c840507b994abc0f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/criss/download_and_preprocess_tatoeba.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -SPM_ENCODE=flores/scripts/spm_encode.py -DATA=data_tmp -SPM_MODEL=criss_checkpoints/sentence.bpe.model -DICT=criss_checkpoints/dict.txt - -if [[ -f flores ]]; then - echo "flores already cloned" -else - git clone https://github.com/facebookresearch/flores -fi -if [[ -f LASER ]]; then - echo "LASER already cloned" -else - git clone https://github.com/facebookresearch/LASER -fi -mkdir -p data_tmp -declare -A lang_tatoeba_map=( ["ar_AR"]="ara" ["de_DE"]="deu" ["es_XX"]="spa" ["et_EE"]="est" ["fi_FI"]="fin" ["fr_XX"]="fra" ["hi_IN"]="hin" ["it_IT"]="ita" ["ja_XX"]="jpn" ["ko_KR"]="kor" ["kk_KZ"]="kaz" ["nl_XX"]="nld" ["ru_RU"]="rus" ["tr_TR"]="tur" ["vi_VN"]="vie" ["zh_CN"]="cmn") -for lang in ar_AR de_DE es_XX et_EE fi_FI fr_XX hi_IN it_IT ja_XX kk_KZ ko_KR nl_XX ru_RU tr_TR vi_VN zh_CN; do - lang_tatoeba=${lang_tatoeba_map[$lang]} - echo $lang_tatoeba - datadir=$DATA/${lang}-en_XX-tatoeba - rm -rf $datadir - mkdir -p $datadir - TEST_PREFIX=LASER/data/tatoeba/v1/tatoeba - python $SPM_ENCODE \ - --model ${SPM_MODEL} \ - --output_format=piece \ - --inputs ${TEST_PREFIX}.${lang_tatoeba}-eng.${lang_tatoeba} ${TEST_PREFIX}.${lang_tatoeba}-eng.eng \ - --outputs $datadir/test.bpe.${lang}-en_XX.${lang} $datadir/test.bpe.${lang}-en_XX.en_XX - - # binarize data - fairseq-preprocess \ - --source-lang ${lang} --target-lang en_XX \ - --testpref $datadir/test.bpe.${lang}-en_XX \ - --destdir $datadir \ - --srcdict ${DICT} \ - --joined-dictionary \ - --workers 4 -done diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/wav2vec/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/fused_lamb.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/fused_lamb.py deleted file mode 100644 index f4f2bdb0c6c65f7758509b6d4d2f2c48cb6e8b4f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/fairseq/optim/fused_lamb.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from fairseq.optim import LegacyFairseqOptimizer, register_optimizer - - -@register_optimizer("lamb") -class FairseqLAMB(LegacyFairseqOptimizer): - """LAMB optimizer.""" - - def __init__(self, args, params): - super().__init__(args) - try: - from apex.optimizers import FusedLAMB - - self._optimizer = FusedLAMB(params, **self.optimizer_config) - except ImportError: - raise ImportError("Please install apex to use LAMB optimizer") - - @staticmethod - def add_args(parser): - """Add optimizer-specific arguments to the parser.""" - # fmt: off - parser.add_argument('--lamb-betas', default='(0.9, 0.999)', metavar='B', - help='betas for LAMB optimizer') - parser.add_argument('--lamb-eps', type=float, default=1e-8, metavar='D', - help='epsilon for LAMB optimizer') - parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', - help='weight decay') - # fmt: on - - @property - def optimizer_config(self): - """ - Return a kwarg dictionary that will be used to override optimizer - args stored in checkpoints. This allows us to load a checkpoint and - resume training using a different set of optimizer args, e.g., with a - different learning rate. - """ - return { - "lr": self.args.lr[0], - "betas": eval(self.args.lamb_betas), - "eps": self.args.lamb_eps, - "weight_decay": self.args.weight_decay, - } - - @property - def supports_flat_params(self): - return False diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/self_auto_bleu.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/self_auto_bleu.py deleted file mode 100644 index 062bb82f669f63a537b6ee8df4d42d292eb2575e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/metrics/asr_metrics/self_auto_bleu.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import nltk -from misc.bleu_utils import sentence_bleu -import warnings - - -def get_target_sequences(manifest, ground_truth, to_take=1000): - import json - import pathlib - - with open(ground_truth, 'r') as fin: - original_continuations = json.loads(fin.read()) - - sequence2length = [(k, v[0]) for k, v in original_continuations.items()] - assert all(float(v) >= 6.0 for (_, v) in sequence2length) # 6 seconds - - sequence2length.sort(key=lambda x: x[1]) - to_take_sequences = set(v[0] for v in sequence2length[:to_take]) - to_take_ids = [] - - with open(manifest, 'r') as f: - f.readline() - - for i, line in enumerate(f.readlines()): - seq_id = line.split()[0] - seq_id = pathlib.Path(seq_id).name.split('__')[0] - - if seq_id in to_take_sequences: - to_take_ids.append(i) - - print(f'Took {len(to_take_ids)} ids') - return set(to_take_ids) - - -def get_args(): - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument('--asr-transcript', type=str, - help='Path to the transcript file.') - - parser.add_argument('--manifest', required=True) - parser.add_argument('--prompts-description', required=True) - - parser.add_argument('--cut-id', action='store_true', - help='Whether cut the first token (typically a seq id)') - parser.add_argument('--cut-tail', action='store_true', - help='Whether cut the last token (typically a speaker id)') - parser.add_argument('--debug', action='store_true') - - args = parser.parse_args() - - return args - - -def get_self_bleu(utterances, averaging_mode, weights): - self_bleu = [] - - for i in range(len(utterances)): - hypo = utterances[i] - rest = utterances[:i] + utterances[i+1:] - - self_bleu.append(sentence_bleu(rest, hypo, weights, - no_length_penalty=True, averaging_mode=averaging_mode)) - - return self_bleu - - -def get_self_bleu2_arithmetic(utterances): - weights = (0.5, 0.5) # equal weight for unigrams and bigrams - return get_self_bleu(utterances, averaging_mode='arithmetic', weights=weights) - - -def get_self_bleu2_geometric(utterances): - weights = (0.5, 0.5) - return get_self_bleu(utterances, averaging_mode='geometric', weights=weights) - - -def get_auto_bleu2_arithmetic(utterances): - weights = (0.5, 0.5) - return [auto_bleu(u, mean_mode='arithmetic', weights=weights) for u in utterances] - - -def get_auto_bleu2_geometric(utterances): - weights = (0.5, 0.5) - return [auto_bleu(u, mean_mode='geometric', weights=weights) for u in utterances] - - -def get_auto_bleu3_geometric(utterances): - weights = (1./3, 1./3, 1./3) - return [auto_bleu(u, mean_mode='geometric', weights=weights) for u in utterances] - - -def get_auto_bleu3_arithmetic(utterances): - weights = (1./3, 1./3, 1./3) - return [auto_bleu(u, mean_mode='arithmetic', weights=weights) for u in utterances] - - -def get_self_bleu3_arithmetic(utterances): - weights = (1./3, 1./3, 1./3) - return get_self_bleu(utterances, averaging_mode='arithmetic', weights=weights) - - -def get_self_bleu3_geometric(utterances): - weights = (1./3, 1./3, 1./3) - return get_self_bleu(utterances, averaging_mode='geometric', weights=weights) - - -def auto_bleu(sentence, weights, mean_mode='arithmetic'): - if len(sentence) <= 1: - return 0 - - N = len(weights) - - bleu_n = np.zeros([N]) - for n in range(N): - targ_ngrams = list(nltk.ngrams(sentence, n+1)) - for p in range(len(targ_ngrams)): - left = sentence[:p] - right = sentence[(p+n+1):] - rest_ngrams = list(nltk.ngrams(left, n+1)) + \ - list(nltk.ngrams(right, n+1)) - # compute the nb of matching ngrams - bleu_n[n] += targ_ngrams[p] in rest_ngrams - bleu_n[n] /= len(targ_ngrams) # average them to get a proportion - - weights = np.array(weights) - if mean_mode == 'arithmetic': - return (bleu_n * weights).sum() - elif mean_mode == 'geometric': - return (bleu_n ** weights).prod() - else: - raise ValueError(f'Unknown agggregation mode {mean_mode}') - - -def main(): - from multiprocessing import Pool - - args = get_args() - target_ids = get_target_sequences(args.manifest, args.prompts_description) - - with open(args.asr_transcript, 'r') as fin: - lines = fin.readlines() - - terms = [x.strip().split() for x in lines] - filtered = [] - for term in terms: - line_id = int(term[-1].split('-')[1][:-1]) - if line_id in target_ids: - filtered.append(term) - terms = filtered - - if args.cut_id: - terms = [x[1:] for x in terms] - if args.cut_tail: - terms = [x[:-1] for x in terms] - - if args.debug: - terms = terms[:10] - - tasks = [ - ('Self-BLEU2-arithmetic', get_self_bleu2_arithmetic), - ('Self-BLEU2-geometric', get_self_bleu2_geometric), - ('Auto-BLEU2-arithmetic', get_auto_bleu2_arithmetic), - ('Auto-BLEU2-geometric', get_auto_bleu2_geometric), - - ('Self-BLEU3-arithmetic', get_self_bleu3_arithmetic), - ('Self-BLEU3-geometric', get_self_bleu3_geometric), - ('Auto-BLEU3-arithmetic', get_auto_bleu3_arithmetic), - ('Auto-BLEU3-geometric', get_auto_bleu3_geometric), - ] - - n_processes = min(16, len(tasks)) - with Pool(n_processes) as pool: - metrics = pool.map(run_f, [(t[1], terms) for t in tasks]) - - for (metric_name, _), metric in zip(tasks, metrics): - metric, sem = np.mean(metric), np.std(metric) / np.sqrt(len(metric)) - - metric, sem = [ - round(100 * x, 2) for x in [metric, sem] - ] - - print(f'{metric_name} {metric} +- {sem}') - - -def run_f(task_params): - f, terms = task_params - return f(terms) - - -if __name__ == '__main__': - # NLTK produces warnings - warnings.filterwarnings("ignore") - - main() diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/__init__.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/__init__.py deleted file mode 100644 index 337c77ac7bfb7e11a0662b86b98c4c0a02da26df..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/__init__.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -import argparse -import importlib -import os -from contextlib import ExitStack - -from fairseq.dataclass import FairseqDataclass -from fairseq.dataclass.utils import merge_with_parent -from hydra.core.config_store import ConfigStore -from omegaconf import open_dict, OmegaConf - -from .composite_encoder import CompositeEncoder -from .distributed_fairseq_model import DistributedFairseqModel -from .fairseq_decoder import FairseqDecoder -from .fairseq_encoder import FairseqEncoder -from .fairseq_incremental_decoder import FairseqIncrementalDecoder -from .fairseq_model import ( - BaseFairseqModel, - FairseqEncoderDecoderModel, - FairseqEncoderModel, - FairseqLanguageModel, - FairseqModel, - FairseqMultiModel, -) - - -MODEL_REGISTRY = {} -MODEL_DATACLASS_REGISTRY = {} -ARCH_MODEL_REGISTRY = {} -ARCH_MODEL_NAME_REGISTRY = {} -ARCH_MODEL_INV_REGISTRY = {} -ARCH_CONFIG_REGISTRY = {} - - -__all__ = [ - "BaseFairseqModel", - "CompositeEncoder", - "DistributedFairseqModel", - "FairseqDecoder", - "FairseqEncoder", - "FairseqEncoderDecoderModel", - "FairseqEncoderModel", - "FairseqIncrementalDecoder", - "FairseqLanguageModel", - "FairseqModel", - "FairseqMultiModel", -] - - -def build_model(cfg: FairseqDataclass, task): - - model = None - model_type = getattr(cfg, "_name", None) or getattr(cfg, "arch", None) - - if not model_type and len(cfg) == 1: - # this is hit if config object is nested in directory that is named after model type - - model_type = next(iter(cfg)) - if model_type in MODEL_DATACLASS_REGISTRY: - cfg = cfg[model_type] - else: - raise Exception( - "Could not infer model type from directory. Please add _name field to indicate model type. " - "Available models: " - + str(MODEL_DATACLASS_REGISTRY.keys()) - + " Requested model type: " - + model_type - ) - - if model_type in ARCH_MODEL_REGISTRY: - # case 1: legacy models - model = ARCH_MODEL_REGISTRY[model_type] - elif model_type in MODEL_DATACLASS_REGISTRY: - # case 2: config-driven models - model = MODEL_REGISTRY[model_type] - - if model_type in MODEL_DATACLASS_REGISTRY: - # set defaults from dataclass. note that arch name and model name can be the same - dc = MODEL_DATACLASS_REGISTRY[model_type] - - if isinstance(cfg, argparse.Namespace): - cfg = dc.from_namespace(cfg) - else: - cfg = merge_with_parent(dc(), cfg) - else: - if model_type in ARCH_CONFIG_REGISTRY: - with open_dict(cfg) if OmegaConf.is_config(cfg) else ExitStack(): - # this calls the different "arch" functions (like base_architecture()) that you indicate - # if you specify --arch on the command line. this is only applicable to the old argparse based models - # hydra models should expose different architectures via different config files - # it will modify the cfg object and default parameters according to the arch - ARCH_CONFIG_REGISTRY[model_type](cfg) - - assert model is not None, ( - f"Could not infer model type from {cfg}. " - "Available models: {}".format( - MODEL_DATACLASS_REGISTRY.keys() - ) - + f" Requested model type: {model_type}" - ) - - return model.build_model(cfg, task) - - -def register_model(name, dataclass=None): - """ - New model types can be added to fairseq with the :func:`register_model` - function decorator. - - For example:: - - @register_model('lstm') - class LSTM(FairseqEncoderDecoderModel): - (...) - - .. note:: All models must implement the :class:`BaseFairseqModel` interface. - Typically you will extend :class:`FairseqEncoderDecoderModel` for - sequence-to-sequence tasks or :class:`FairseqLanguageModel` for - language modeling tasks. - - Args: - name (str): the name of the model - """ - - def register_model_cls(cls): - if name in MODEL_REGISTRY: - raise ValueError("Cannot register duplicate model ({})".format(name)) - if not issubclass(cls, BaseFairseqModel): - raise ValueError( - "Model ({}: {}) must extend BaseFairseqModel".format(name, cls.__name__) - ) - MODEL_REGISTRY[name] = cls - if dataclass is not None and not issubclass(dataclass, FairseqDataclass): - raise ValueError( - "Dataclass {} must extend FairseqDataclass".format(dataclass) - ) - - cls.__dataclass = dataclass - if dataclass is not None: - MODEL_DATACLASS_REGISTRY[name] = dataclass - - cs = ConfigStore.instance() - node = dataclass() - node._name = name - cs.store(name=name, group="model", node=node, provider="fairseq") - - @register_model_architecture(name, name) - def noop(_): - pass - - return cls - - return register_model_cls - - -def register_model_architecture(model_name, arch_name): - """ - New model architectures can be added to fairseq with the - :func:`register_model_architecture` function decorator. After registration, - model architectures can be selected with the ``--arch`` command-line - argument. - - For example:: - - @register_model_architecture('lstm', 'lstm_luong_wmt_en_de') - def lstm_luong_wmt_en_de(cfg): - args.encoder_embed_dim = getattr(cfg.model, 'encoder_embed_dim', 1000) - (...) - - The decorated function should take a single argument *cfg*, which is a - :class:`omegaconf.DictConfig`. The decorated function should modify these - arguments in-place to match the desired architecture. - - Args: - model_name (str): the name of the Model (Model must already be - registered) - arch_name (str): the name of the model architecture (``--arch``) - """ - - def register_model_arch_fn(fn): - if model_name not in MODEL_REGISTRY: - raise ValueError( - "Cannot register model architecture for unknown model type ({})".format( - model_name - ) - ) - if arch_name in ARCH_MODEL_REGISTRY: - raise ValueError( - "Cannot register duplicate model architecture ({})".format(arch_name) - ) - if not callable(fn): - raise ValueError( - "Model architecture must be callable ({})".format(arch_name) - ) - ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name] - ARCH_MODEL_NAME_REGISTRY[arch_name] = model_name - ARCH_MODEL_INV_REGISTRY.setdefault(model_name, []).append(arch_name) - ARCH_CONFIG_REGISTRY[arch_name] = fn - return fn - - return register_model_arch_fn - - -def import_models(models_dir, namespace): - for file in os.listdir(models_dir): - path = os.path.join(models_dir, file) - if ( - not file.startswith("_") - and not file.startswith(".") - and (file.endswith(".py") or os.path.isdir(path)) - ): - model_name = file[: file.find(".py")] if file.endswith(".py") else file - importlib.import_module(namespace + "." + model_name) - - # extra `model_parser` for sphinx - if model_name in MODEL_REGISTRY: - parser = argparse.ArgumentParser(add_help=False) - group_archs = parser.add_argument_group("Named architectures") - group_archs.add_argument( - "--arch", choices=ARCH_MODEL_INV_REGISTRY[model_name] - ) - group_args = parser.add_argument_group( - "Additional command-line arguments" - ) - MODEL_REGISTRY[model_name].add_args(group_args) - globals()[model_name + "_parser"] = parser - - -# automatically import any Python files in the models/ directory -models_dir = os.path.dirname(__file__) -import_models(models_dir, "fairseq.models") diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/roberta/alignment_utils.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/roberta/alignment_utils.py deleted file mode 100644 index ccc7f74cb94d5b8baa2d4e9dfd44f653d47ee43e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/models/roberta/alignment_utils.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from collections import Counter -from typing import List - -import torch - - -def align_bpe_to_words(roberta, bpe_tokens: torch.LongTensor, other_tokens: List[str]): - """ - Helper to align GPT-2 BPE to other tokenization formats (e.g., spaCy). - - Args: - roberta (RobertaHubInterface): RoBERTa instance - bpe_tokens (torch.LongTensor): GPT-2 BPE tokens of shape `(T_bpe)` - other_tokens (List[str]): other tokens of shape `(T_words)` - - Returns: - List[str]: mapping from *other_tokens* to corresponding *bpe_tokens*. - """ - assert bpe_tokens.dim() == 1 - assert bpe_tokens[0] == 0 - - def clean(text): - return text.strip() - - # remove whitespaces to simplify alignment - bpe_tokens = [roberta.task.source_dictionary.string([x]) for x in bpe_tokens] - bpe_tokens = [ - clean(roberta.bpe.decode(x) if x not in {"", ""} else x) for x in bpe_tokens - ] - other_tokens = [clean(str(o)) for o in other_tokens] - - # strip leading - bpe_tokens = bpe_tokens[1:] - assert "".join(bpe_tokens) == "".join(other_tokens) - - # create alignment from every word to a list of BPE tokens - alignment = [] - bpe_toks = filter(lambda item: item[1] != "", enumerate(bpe_tokens, start=1)) - j, bpe_tok = next(bpe_toks) - for other_tok in other_tokens: - bpe_indices = [] - while True: - if other_tok.startswith(bpe_tok): - bpe_indices.append(j) - other_tok = other_tok[len(bpe_tok) :] - try: - j, bpe_tok = next(bpe_toks) - except StopIteration: - j, bpe_tok = None, None - elif bpe_tok.startswith(other_tok): - # other_tok spans multiple BPE tokens - bpe_indices.append(j) - bpe_tok = bpe_tok[len(other_tok) :] - other_tok = "" - else: - raise Exception('Cannot align "{}" and "{}"'.format(other_tok, bpe_tok)) - if other_tok == "": - break - assert len(bpe_indices) > 0 - alignment.append(bpe_indices) - assert len(alignment) == len(other_tokens) - - return alignment - - -def align_features_to_words(roberta, features, alignment): - """ - Align given features to words. - - Args: - roberta (RobertaHubInterface): RoBERTa instance - features (torch.Tensor): features to align of shape `(T_bpe x C)` - alignment: alignment between BPE tokens and words returned by - func:`align_bpe_to_words`. - """ - assert features.dim() == 2 - - bpe_counts = Counter(j for bpe_indices in alignment for j in bpe_indices) - assert bpe_counts[0] == 0 # shouldn't be aligned - denom = features.new([bpe_counts.get(j, 1) for j in range(len(features))]) - weighted_features = features / denom.unsqueeze(-1) - - output = [weighted_features[0]] - largest_j = -1 - for bpe_indices in alignment: - output.append(weighted_features[bpe_indices].sum(dim=0)) - largest_j = max(largest_j, *bpe_indices) - for j in range(largest_j + 1, len(features)): - output.append(weighted_features[j]) - output = torch.stack(output) - assert torch.all(torch.abs(output.sum(dim=0) - features.sum(dim=0)) < 1e-4) - return output - - -def spacy_nlp(): - if getattr(spacy_nlp, "_nlp", None) is None: - try: - from spacy.lang.en import English - - spacy_nlp._nlp = English() - except ImportError: - raise ImportError("Please install spacy with: pip install spacy") - return spacy_nlp._nlp - - -def spacy_tokenizer(): - if getattr(spacy_tokenizer, "_tokenizer", None) is None: - try: - nlp = spacy_nlp() - spacy_tokenizer._tokenizer = nlp.Defaults.create_tokenizer(nlp) - except ImportError: - raise ImportError("Please install spacy with: pip install spacy") - return spacy_tokenizer._tokenizer diff --git a/spaces/OIUGLK/bingo/src/app/loading.css b/spaces/OIUGLK/bingo/src/app/loading.css deleted file mode 100644 index eaaab6a86a228334c4eca3c5368ae6f0f593d405..0000000000000000000000000000000000000000 --- a/spaces/OIUGLK/bingo/src/app/loading.css +++ /dev/null @@ -1,68 +0,0 @@ -::-webkit-scrollbar { - width: 10px; - height: 10px; - display: none; -} - -::-webkit-scrollbar-button:start:decrement, -::-webkit-scrollbar-button:end:increment { - height: 30px; - background-color: transparent; -} - -::-webkit-scrollbar-track-piece { - background-color: #3b3b3b; - -webkit-border-radius: 16px; -} - -::-webkit-scrollbar-thumb:vertical { - height: 50px; - background-color: #666; - border: 1px solid #eee; - -webkit-border-radius: 6px; -} - -/* loading start */ -.loading-spinner { - display: flex; - justify-content: center; - align-items: center; - height: 100vh; - opacity: 1; - transition: opacity .8s ease-out; -} - -.loading-spinner.hidden { - opacity: 0; -} - -.loading-spinner>div { - width: 30px; - height: 30px; - background: linear-gradient(90deg, #2870EA 10.79%, #1B4AEF 87.08%); - - border-radius: 100%; - display: inline-block; - animation: sk-bouncedelay 1.4s infinite ease-in-out both; -} - -.loading-spinner .bounce1 { - animation-delay: -0.32s; -} - -.loading-spinner .bounce2 { - animation-delay: -0.16s; -} - -@keyframes sk-bouncedelay { - - 0%, - 80%, - 100% { - transform: scale(0); - } - - 40% { - transform: scale(1.0); - } -} diff --git a/spaces/OIUGLK/bingo/src/components/markdown.tsx b/spaces/OIUGLK/bingo/src/components/markdown.tsx deleted file mode 100644 index d4491467a1f14d1d72e535caac9c40636054e5df..0000000000000000000000000000000000000000 --- a/spaces/OIUGLK/bingo/src/components/markdown.tsx +++ /dev/null @@ -1,9 +0,0 @@ -import { FC, memo } from 'react' -import ReactMarkdown, { Options } from 'react-markdown' - -export const MemoizedReactMarkdown: FC = memo( - ReactMarkdown, - (prevProps, nextProps) => - prevProps.children === nextProps.children && - prevProps.className === nextProps.className -) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/models/fcos.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/models/fcos.py deleted file mode 100644 index 1c752029b7fc64ec375a55182e5342c9eb48bb33..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/configs/common/models/fcos.py +++ /dev/null @@ -1,23 +0,0 @@ -from detectron2.modeling.meta_arch.fcos import FCOS, FCOSHead - -from .retinanet import model - -model._target_ = FCOS - -del model.anchor_generator -del model.box2box_transform -del model.anchor_matcher -del model.input_format - -# Use P5 instead of C5 to compute P6/P7 -# (Sec 2.2 of https://arxiv.org/abs/2006.09214) -model.backbone.top_block.in_feature = "p5" -model.backbone.top_block.in_channels = 256 - -# New score threshold determined based on sqrt(cls_score * centerness) -model.test_score_thresh = 0.2 -model.test_nms_thresh = 0.6 - -model.head._target_ = FCOSHead -del model.head.num_anchors -model.head.norm = "GN" diff --git a/spaces/OpenMotionLab/MotionGPT/pyrender/docs/make.bat b/spaces/OpenMotionLab/MotionGPT/pyrender/docs/make.bat deleted file mode 100644 index 4d9eb83d9f9309029f4b14ff09024658bb0f5563..0000000000000000000000000000000000000000 --- a/spaces/OpenMotionLab/MotionGPT/pyrender/docs/make.bat +++ /dev/null @@ -1,35 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% - -:end -popd diff --git a/spaces/PKUWilliamYang/StyleGANEX/README.md b/spaces/PKUWilliamYang/StyleGANEX/README.md deleted file mode 100644 index afb242d1684ce4c8115effececd87172500e2a86..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/StyleGANEX/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: StyleGANEX -sdk: gradio -emoji: 🐨 -colorFrom: pink -colorTo: yellow -sdk_version: 3.4 -app_file: app.py -pinned: false ---- \ No newline at end of file diff --git a/spaces/PKaushik/Human-Part-Segmentation/app.py b/spaces/PKaushik/Human-Part-Segmentation/app.py deleted file mode 100644 index b3c6c25f6286fe66a8a58dfba92ef5d4198f87b7..0000000000000000000000000000000000000000 --- a/spaces/PKaushik/Human-Part-Segmentation/app.py +++ /dev/null @@ -1,63 +0,0 @@ -import numpy as np -import tensorflow as tf -import gradio as gr -from huggingface_hub import from_pretrained_keras -import cv2 - -model = from_pretrained_keras("keras-io/deeplabv3p-resnet50") - -colormap = np.array([[0,0,0], [31,119,180], [44,160,44], [44, 127, 125], [52, 225, 143], - [217, 222, 163], [254, 128, 37], [130, 162, 128], [121, 7, 166], [136, 183, 248], - [85, 1, 76], [22, 23, 62], [159, 50, 15], [101, 93, 152], [252, 229, 92], - [167, 173, 17], [218, 252, 252], [238, 126, 197], [116, 157, 140], [214, 220, 252]], dtype=np.uint8) - -img_size = 512 - -def read_image(image): - image = tf.convert_to_tensor(image) - image.set_shape([None, None, 3]) - image = tf.image.resize(images=image, size=[img_size, img_size]) - image = image / 127.5 - 1 - return image - -def infer(model, image_tensor): - predictions = model.predict(np.expand_dims((image_tensor), axis=0)) - predictions = np.squeeze(predictions) - predictions = np.argmax(predictions, axis=2) - return predictions - -def decode_segmentation_masks(mask, colormap, n_classes): - r = np.zeros_like(mask).astype(np.uint8) - g = np.zeros_like(mask).astype(np.uint8) - b = np.zeros_like(mask).astype(np.uint8) - for l in range(0, n_classes): - idx = mask == l - r[idx] = colormap[l, 0] - g[idx] = colormap[l, 1] - b[idx] = colormap[l, 2] - rgb = np.stack([r, g, b], axis=2) - return rgb - -def get_overlay(image, colored_mask): - image = tf.keras.preprocessing.image.array_to_img(image) - image = np.array(image).astype(np.uint8) - overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0) - return overlay - -def segmentation(input_image): - image_tensor = read_image(input_image) - prediction_mask = infer(image_tensor=image_tensor, model=model) - prediction_colormap = decode_segmentation_masks(prediction_mask, colormap, 20) - overlay = get_overlay(image_tensor, prediction_colormap) - return (overlay, prediction_colormap) - -i = gr.inputs.Image() -o = [gr.outputs.Image('pil'), gr.outputs.Image('pil')] - -examples = [["example_image_2.jpeg"], ["example_image_2.jpg"], ["example_image_3.jpeg"]] -title = "Human Part Segmentation" -description = "Upload an image or select from examples to segment out different human parts." - -article = "" -gr.Interface(segmentation, i, o, examples=examples, allow_flagging=False, analytics_enabled=False, - title=title, description=description, article=article).launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/compile-tree-il.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/compile-tree-il.go deleted file mode 100644 index d0b55ebffcf268a12003962017f7db85d34e4e14..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/ecmascript/compile-tree-il.go and /dev/null differ diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-88.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-88.go deleted file mode 100644 index 47e42403bb95ce250901f303ec7efba7c8f1575f..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/srfi/srfi-88.go and /dev/null differ diff --git a/spaces/PhucBui/demo/bear_classifier.py b/spaces/PhucBui/demo/bear_classifier.py deleted file mode 100644 index f60cbc7404ef7030d0836b2bb372ec105047ac77..0000000000000000000000000000000000000000 --- a/spaces/PhucBui/demo/bear_classifier.py +++ /dev/null @@ -1,23 +0,0 @@ -# import fastai -# import fastbook -# fastbook.setup_book() - -from fastai.vision.all import * -import torch -print(torch.__version__) -# import PIL.Image -# if not hasattr(PIL.Image, 'Resampling'): # Pillow<9.0 -# PIL.Image.Resampling = PIL.Image - -# Now PIL.Image.Resampling.BICUBIC is always recognized. - - -if __name__ == '__main__': - learner = load_learner('export.pkl', cpu=True) - path = Path('images/mybear.jpeg') - # Prediction - pred,pred_idx,probs = learner.predict(path) - # Print prediction - print(pred) - print(probs) - diff --git a/spaces/Politrees/RVC_V2_Huggingface_Version/lib/infer_pack/models_onnx.py b/spaces/Politrees/RVC_V2_Huggingface_Version/lib/infer_pack/models_onnx.py deleted file mode 100644 index 963e67b29f828e9fdd096397952054fe77cf3d10..0000000000000000000000000000000000000000 --- a/spaces/Politrees/RVC_V2_Huggingface_Version/lib/infer_pack/models_onnx.py +++ /dev/null @@ -1,819 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMsNSFsidM(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - version, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - if version == "v1": - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - else: - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - self.speaker_map = None - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def construct_spkmixmap(self, n_speaker): - self.speaker_map = torch.zeros((n_speaker, 1, 1, self.gin_channels)) - for i in range(n_speaker): - self.speaker_map[i] = self.emb_g(torch.LongTensor([[i]])) - self.speaker_map = self.speaker_map.unsqueeze(0) - - def forward(self, phone, phone_lengths, pitch, nsff0, g, rnd, max_len=None): - if self.speaker_map is not None: # [N, S] * [S, B, 1, H] - g = g.reshape((g.shape[0], g.shape[1], 1, 1, 1)) # [N, S, B, 1, 1] - g = g * self.speaker_map # [N, S, B, 1, H] - g = torch.sum(g, dim=1) # [N, 1, B, 1, H] - g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N] - else: - g = g.unsqueeze(0) - g = self.emb_g(g).transpose(1, 2) - - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/adversarial/discriminators/__init__.py b/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/adversarial/discriminators/__init__.py deleted file mode 100644 index f9e5ff59950ee0b1d1a67c9b3831d67d08048148..0000000000000000000000000000000000000000 --- a/spaces/Prof-Reza/Audiocraft_Music-Audio_Generation/audiocraft/adversarial/discriminators/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -# flake8: noqa -from .mpd import MultiPeriodDiscriminator -from .msd import MultiScaleDiscriminator -from .msstftd import MultiScaleSTFTDiscriminator diff --git a/spaces/Ramse/TTS_Hindi/modules/hifigan/model/.ipynb_checkpoints/resstack-checkpoint.py b/spaces/Ramse/TTS_Hindi/modules/hifigan/model/.ipynb_checkpoints/resstack-checkpoint.py deleted file mode 100644 index 29cb7729f98911042415e20b6b4323edf88ed929..0000000000000000000000000000000000000000 --- a/spaces/Ramse/TTS_Hindi/modules/hifigan/model/.ipynb_checkpoints/resstack-checkpoint.py +++ /dev/null @@ -1,36 +0,0 @@ -import torch -from torch import nn -from ..utils.utils import weights_init - -class ResStack(nn.Module): - def __init__(self, kernel, channel, padding, dilations = [1, 3, 5]): - super(ResStack, self).__init__() - resstack = [] - for dilation in dilations: - resstack += [ - nn.LeakyReLU(0.2), - nn.ReflectionPad1d(dilation), - nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=kernel, dilation=dilation)), - nn.LeakyReLU(0.2), - nn.ReflectionPad1d(padding), - nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=1)), - ] - self.resstack = nn.Sequential(*resstack) - - self.shortcut = nn.utils.weight_norm(nn.Conv1d(channel, channel, kernel_size=1)) - - def forward(self, x): - x1 = self.shortcut(x) - x2 = self.resstack(x) - return x1 + x2 - - def remove_weight_norm(self): - # nn.utils.remove_weight_norm(self.resstack[2]) - # nn.utils.remove_weight_norm(self.resstack[4]) - for idx, layer in enumerate(self.resstack): - if len(layer.state_dict()) != 0: - try: - nn.utils.remove_weight_norm(layer) - except: - layer.remove_weight_norm() - nn.utils.remove_weight_norm(self.shortcut) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/distlib/locators.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/distlib/locators.py deleted file mode 100644 index 966ebc0e37d6104a8e0e1fefe9dc526f39409ce2..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/distlib/locators.py +++ /dev/null @@ -1,1300 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2015 Vinay Sajip. -# Licensed to the Python Software Foundation under a contributor agreement. -# See LICENSE.txt and CONTRIBUTORS.txt. -# - -import gzip -from io import BytesIO -import json -import logging -import os -import posixpath -import re -try: - import threading -except ImportError: # pragma: no cover - import dummy_threading as threading -import zlib - -from . import DistlibException -from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, - queue, quote, unescape, build_opener, - HTTPRedirectHandler as BaseRedirectHandler, text_type, - Request, HTTPError, URLError) -from .database import Distribution, DistributionPath, make_dist -from .metadata import Metadata, MetadataInvalidError -from .util import (cached_property, ensure_slash, split_filename, get_project_data, - parse_requirement, parse_name_and_version, ServerProxy, - normalize_name) -from .version import get_scheme, UnsupportedVersionError -from .wheel import Wheel, is_compatible - -logger = logging.getLogger(__name__) - -HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)') -CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) -HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') -DEFAULT_INDEX = 'https://pypi.org/pypi' - -def get_all_distribution_names(url=None): - """ - Return all distribution names known by an index. - :param url: The URL of the index. - :return: A list of all known distribution names. - """ - if url is None: - url = DEFAULT_INDEX - client = ServerProxy(url, timeout=3.0) - try: - return client.list_packages() - finally: - client('close')() - -class RedirectHandler(BaseRedirectHandler): - """ - A class to work around a bug in some Python 3.2.x releases. - """ - # There's a bug in the base version for some 3.2.x - # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header - # returns e.g. /abc, it bails because it says the scheme '' - # is bogus, when actually it should use the request's - # URL for the scheme. See Python issue #13696. - def http_error_302(self, req, fp, code, msg, headers): - # Some servers (incorrectly) return multiple Location headers - # (so probably same goes for URI). Use first header. - newurl = None - for key in ('location', 'uri'): - if key in headers: - newurl = headers[key] - break - if newurl is None: # pragma: no cover - return - urlparts = urlparse(newurl) - if urlparts.scheme == '': - newurl = urljoin(req.get_full_url(), newurl) - if hasattr(headers, 'replace_header'): - headers.replace_header(key, newurl) - else: - headers[key] = newurl - return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, - headers) - - http_error_301 = http_error_303 = http_error_307 = http_error_302 - -class Locator(object): - """ - A base class for locators - things that locate distributions. - """ - source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') - binary_extensions = ('.egg', '.exe', '.whl') - excluded_extensions = ('.pdf',) - - # A list of tags indicating which wheels you want to match. The default - # value of None matches against the tags compatible with the running - # Python. If you want to match other values, set wheel_tags on a locator - # instance to a list of tuples (pyver, abi, arch) which you want to match. - wheel_tags = None - - downloadable_extensions = source_extensions + ('.whl',) - - def __init__(self, scheme='default'): - """ - Initialise an instance. - :param scheme: Because locators look for most recent versions, they - need to know the version scheme to use. This specifies - the current PEP-recommended scheme - use ``'legacy'`` - if you need to support existing distributions on PyPI. - """ - self._cache = {} - self.scheme = scheme - # Because of bugs in some of the handlers on some of the platforms, - # we use our own opener rather than just using urlopen. - self.opener = build_opener(RedirectHandler()) - # If get_project() is called from locate(), the matcher instance - # is set from the requirement passed to locate(). See issue #18 for - # why this can be useful to know. - self.matcher = None - self.errors = queue.Queue() - - def get_errors(self): - """ - Return any errors which have occurred. - """ - result = [] - while not self.errors.empty(): # pragma: no cover - try: - e = self.errors.get(False) - result.append(e) - except self.errors.Empty: - continue - self.errors.task_done() - return result - - def clear_errors(self): - """ - Clear any errors which may have been logged. - """ - # Just get the errors and throw them away - self.get_errors() - - def clear_cache(self): - self._cache.clear() - - def _get_scheme(self): - return self._scheme - - def _set_scheme(self, value): - self._scheme = value - - scheme = property(_get_scheme, _set_scheme) - - def _get_project(self, name): - """ - For a given project, get a dictionary mapping available versions to Distribution - instances. - - This should be implemented in subclasses. - - If called from a locate() request, self.matcher will be set to a - matcher for the requirement to satisfy, otherwise it will be None. - """ - raise NotImplementedError('Please implement in the subclass') - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - raise NotImplementedError('Please implement in the subclass') - - def get_project(self, name): - """ - For a given project, get a dictionary mapping available versions to Distribution - instances. - - This calls _get_project to do all the work, and just implements a caching layer on top. - """ - if self._cache is None: # pragma: no cover - result = self._get_project(name) - elif name in self._cache: - result = self._cache[name] - else: - self.clear_errors() - result = self._get_project(name) - self._cache[name] = result - return result - - def score_url(self, url): - """ - Give an url a score which can be used to choose preferred URLs - for a given project release. - """ - t = urlparse(url) - basename = posixpath.basename(t.path) - compatible = True - is_wheel = basename.endswith('.whl') - is_downloadable = basename.endswith(self.downloadable_extensions) - if is_wheel: - compatible = is_compatible(Wheel(basename), self.wheel_tags) - return (t.scheme == 'https', 'pypi.org' in t.netloc, - is_downloadable, is_wheel, compatible, basename) - - def prefer_url(self, url1, url2): - """ - Choose one of two URLs where both are candidates for distribution - archives for the same version of a distribution (for example, - .tar.gz vs. zip). - - The current implementation favours https:// URLs over http://, archives - from PyPI over those from other locations, wheel compatibility (if a - wheel) and then the archive name. - """ - result = url2 - if url1: - s1 = self.score_url(url1) - s2 = self.score_url(url2) - if s1 > s2: - result = url1 - if result != url2: - logger.debug('Not replacing %r with %r', url1, url2) - else: - logger.debug('Replacing %r with %r', url1, url2) - return result - - def split_filename(self, filename, project_name): - """ - Attempt to split a filename in project name, version and Python version. - """ - return split_filename(filename, project_name) - - def convert_url_to_download_info(self, url, project_name): - """ - See if a URL is a candidate for a download URL for a project (the URL - has typically been scraped from an HTML page). - - If it is, a dictionary is returned with keys "name", "version", - "filename" and "url"; otherwise, None is returned. - """ - def same_project(name1, name2): - return normalize_name(name1) == normalize_name(name2) - - result = None - scheme, netloc, path, params, query, frag = urlparse(url) - if frag.lower().startswith('egg='): # pragma: no cover - logger.debug('%s: version hint in fragment: %r', - project_name, frag) - m = HASHER_HASH.match(frag) - if m: - algo, digest = m.groups() - else: - algo, digest = None, None - origpath = path - if path and path[-1] == '/': # pragma: no cover - path = path[:-1] - if path.endswith('.whl'): - try: - wheel = Wheel(path) - if not is_compatible(wheel, self.wheel_tags): - logger.debug('Wheel not compatible: %s', path) - else: - if project_name is None: - include = True - else: - include = same_project(wheel.name, project_name) - if include: - result = { - 'name': wheel.name, - 'version': wheel.version, - 'filename': wheel.filename, - 'url': urlunparse((scheme, netloc, origpath, - params, query, '')), - 'python-version': ', '.join( - ['.'.join(list(v[2:])) for v in wheel.pyver]), - } - except Exception as e: # pragma: no cover - logger.warning('invalid path for wheel: %s', path) - elif not path.endswith(self.downloadable_extensions): # pragma: no cover - logger.debug('Not downloadable: %s', path) - else: # downloadable extension - path = filename = posixpath.basename(path) - for ext in self.downloadable_extensions: - if path.endswith(ext): - path = path[:-len(ext)] - t = self.split_filename(path, project_name) - if not t: # pragma: no cover - logger.debug('No match for project/version: %s', path) - else: - name, version, pyver = t - if not project_name or same_project(project_name, name): - result = { - 'name': name, - 'version': version, - 'filename': filename, - 'url': urlunparse((scheme, netloc, origpath, - params, query, '')), - #'packagetype': 'sdist', - } - if pyver: # pragma: no cover - result['python-version'] = pyver - break - if result and algo: - result['%s_digest' % algo] = digest - return result - - def _get_digest(self, info): - """ - Get a digest from a dictionary by looking at a "digests" dictionary - or keys of the form 'algo_digest'. - - Returns a 2-tuple (algo, digest) if found, else None. Currently - looks only for SHA256, then MD5. - """ - result = None - if 'digests' in info: - digests = info['digests'] - for algo in ('sha256', 'md5'): - if algo in digests: - result = (algo, digests[algo]) - break - if not result: - for algo in ('sha256', 'md5'): - key = '%s_digest' % algo - if key in info: - result = (algo, info[key]) - break - return result - - def _update_version_data(self, result, info): - """ - Update a result dictionary (the final result from _get_project) with a - dictionary for a specific version, which typically holds information - gleaned from a filename or URL for an archive for the distribution. - """ - name = info.pop('name') - version = info.pop('version') - if version in result: - dist = result[version] - md = dist.metadata - else: - dist = make_dist(name, version, scheme=self.scheme) - md = dist.metadata - dist.digest = digest = self._get_digest(info) - url = info['url'] - result['digests'][url] = digest - if md.source_url != info['url']: - md.source_url = self.prefer_url(md.source_url, url) - result['urls'].setdefault(version, set()).add(url) - dist.locator = self - result[version] = dist - - def locate(self, requirement, prereleases=False): - """ - Find the most recent distribution which matches the given - requirement. - - :param requirement: A requirement of the form 'foo (1.0)' or perhaps - 'foo (>= 1.0, < 2.0, != 1.3)' - :param prereleases: If ``True``, allow pre-release versions - to be located. Otherwise, pre-release versions - are not returned. - :return: A :class:`Distribution` instance, or ``None`` if no such - distribution could be located. - """ - result = None - r = parse_requirement(requirement) - if r is None: # pragma: no cover - raise DistlibException('Not a valid requirement: %r' % requirement) - scheme = get_scheme(self.scheme) - self.matcher = matcher = scheme.matcher(r.requirement) - logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) - versions = self.get_project(r.name) - if len(versions) > 2: # urls and digests keys are present - # sometimes, versions are invalid - slist = [] - vcls = matcher.version_class - for k in versions: - if k in ('urls', 'digests'): - continue - try: - if not matcher.match(k): - pass # logger.debug('%s did not match %r', matcher, k) - else: - if prereleases or not vcls(k).is_prerelease: - slist.append(k) - # else: - # logger.debug('skipping pre-release ' - # 'version %s of %s', k, matcher.name) - except Exception: # pragma: no cover - logger.warning('error matching %s with %r', matcher, k) - pass # slist.append(k) - if len(slist) > 1: - slist = sorted(slist, key=scheme.key) - if slist: - logger.debug('sorted list: %s', slist) - version = slist[-1] - result = versions[version] - if result: - if r.extras: - result.extras = r.extras - result.download_urls = versions.get('urls', {}).get(version, set()) - d = {} - sd = versions.get('digests', {}) - for url in result.download_urls: - if url in sd: # pragma: no cover - d[url] = sd[url] - result.digests = d - self.matcher = None - return result - - -class PyPIRPCLocator(Locator): - """ - This locator uses XML-RPC to locate distributions. It therefore - cannot be used with simple mirrors (that only mirror file content). - """ - def __init__(self, url, **kwargs): - """ - Initialise an instance. - - :param url: The URL to use for XML-RPC. - :param kwargs: Passed to the superclass constructor. - """ - super(PyPIRPCLocator, self).__init__(**kwargs) - self.base_url = url - self.client = ServerProxy(url, timeout=3.0) - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - return set(self.client.list_packages()) - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - versions = self.client.package_releases(name, True) - for v in versions: - urls = self.client.release_urls(name, v) - data = self.client.release_data(name, v) - metadata = Metadata(scheme=self.scheme) - metadata.name = data['name'] - metadata.version = data['version'] - metadata.license = data.get('license') - metadata.keywords = data.get('keywords', []) - metadata.summary = data.get('summary') - dist = Distribution(metadata) - if urls: - info = urls[0] - metadata.source_url = info['url'] - dist.digest = self._get_digest(info) - dist.locator = self - result[v] = dist - for info in urls: - url = info['url'] - digest = self._get_digest(info) - result['urls'].setdefault(v, set()).add(url) - result['digests'][url] = digest - return result - -class PyPIJSONLocator(Locator): - """ - This locator uses PyPI's JSON interface. It's very limited in functionality - and probably not worth using. - """ - def __init__(self, url, **kwargs): - super(PyPIJSONLocator, self).__init__(**kwargs) - self.base_url = ensure_slash(url) - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - raise NotImplementedError('Not available from this locator') - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - url = urljoin(self.base_url, '%s/json' % quote(name)) - try: - resp = self.opener.open(url) - data = resp.read().decode() # for now - d = json.loads(data) - md = Metadata(scheme=self.scheme) - data = d['info'] - md.name = data['name'] - md.version = data['version'] - md.license = data.get('license') - md.keywords = data.get('keywords', []) - md.summary = data.get('summary') - dist = Distribution(md) - dist.locator = self - urls = d['urls'] - result[md.version] = dist - for info in d['urls']: - url = info['url'] - dist.download_urls.add(url) - dist.digests[url] = self._get_digest(info) - result['urls'].setdefault(md.version, set()).add(url) - result['digests'][url] = self._get_digest(info) - # Now get other releases - for version, infos in d['releases'].items(): - if version == md.version: - continue # already done - omd = Metadata(scheme=self.scheme) - omd.name = md.name - omd.version = version - odist = Distribution(omd) - odist.locator = self - result[version] = odist - for info in infos: - url = info['url'] - odist.download_urls.add(url) - odist.digests[url] = self._get_digest(info) - result['urls'].setdefault(version, set()).add(url) - result['digests'][url] = self._get_digest(info) -# for info in urls: -# md.source_url = info['url'] -# dist.digest = self._get_digest(info) -# dist.locator = self -# for info in urls: -# url = info['url'] -# result['urls'].setdefault(md.version, set()).add(url) -# result['digests'][url] = self._get_digest(info) - except Exception as e: - self.errors.put(text_type(e)) - logger.exception('JSON fetch failed: %s', e) - return result - - -class Page(object): - """ - This class represents a scraped HTML page. - """ - # The following slightly hairy-looking regex just looks for the contents of - # an anchor link, which has an attribute "href" either immediately preceded - # or immediately followed by a "rel" attribute. The attribute values can be - # declared with double quotes, single quotes or no quotes - which leads to - # the length of the expression. - _href = re.compile(""" -(rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*))\\s+)? -href\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*)) -(\\s+rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*)))? -""", re.I | re.S | re.X) - _base = re.compile(r"""]+)""", re.I | re.S) - - def __init__(self, data, url): - """ - Initialise an instance with the Unicode page contents and the URL they - came from. - """ - self.data = data - self.base_url = self.url = url - m = self._base.search(self.data) - if m: - self.base_url = m.group(1) - - _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) - - @cached_property - def links(self): - """ - Return the URLs of all the links on a page together with information - about their "rel" attribute, for determining which ones to treat as - downloads and which ones to queue for further scraping. - """ - def clean(url): - "Tidy up an URL." - scheme, netloc, path, params, query, frag = urlparse(url) - return urlunparse((scheme, netloc, quote(path), - params, query, frag)) - - result = set() - for match in self._href.finditer(self.data): - d = match.groupdict('') - rel = (d['rel1'] or d['rel2'] or d['rel3'] or - d['rel4'] or d['rel5'] or d['rel6']) - url = d['url1'] or d['url2'] or d['url3'] - url = urljoin(self.base_url, url) - url = unescape(url) - url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) - result.add((url, rel)) - # We sort the result, hoping to bring the most recent versions - # to the front - result = sorted(result, key=lambda t: t[0], reverse=True) - return result - - -class SimpleScrapingLocator(Locator): - """ - A locator which scrapes HTML pages to locate downloads for a distribution. - This runs multiple threads to do the I/O; performance is at least as good - as pip's PackageFinder, which works in an analogous fashion. - """ - - # These are used to deal with various Content-Encoding schemes. - decoders = { - 'deflate': zlib.decompress, - 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(), - 'none': lambda b: b, - } - - def __init__(self, url, timeout=None, num_workers=10, **kwargs): - """ - Initialise an instance. - :param url: The root URL to use for scraping. - :param timeout: The timeout, in seconds, to be applied to requests. - This defaults to ``None`` (no timeout specified). - :param num_workers: The number of worker threads you want to do I/O, - This defaults to 10. - :param kwargs: Passed to the superclass. - """ - super(SimpleScrapingLocator, self).__init__(**kwargs) - self.base_url = ensure_slash(url) - self.timeout = timeout - self._page_cache = {} - self._seen = set() - self._to_fetch = queue.Queue() - self._bad_hosts = set() - self.skip_externals = False - self.num_workers = num_workers - self._lock = threading.RLock() - # See issue #45: we need to be resilient when the locator is used - # in a thread, e.g. with concurrent.futures. We can't use self._lock - # as it is for coordinating our internal threads - the ones created - # in _prepare_threads. - self._gplock = threading.RLock() - self.platform_check = False # See issue #112 - - def _prepare_threads(self): - """ - Threads are created only when get_project is called, and terminate - before it returns. They are there primarily to parallelise I/O (i.e. - fetching web pages). - """ - self._threads = [] - for i in range(self.num_workers): - t = threading.Thread(target=self._fetch) - t.daemon = True - t.start() - self._threads.append(t) - - def _wait_threads(self): - """ - Tell all the threads to terminate (by sending a sentinel value) and - wait for them to do so. - """ - # Note that you need two loops, since you can't say which - # thread will get each sentinel - for t in self._threads: - self._to_fetch.put(None) # sentinel - for t in self._threads: - t.join() - self._threads = [] - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - with self._gplock: - self.result = result - self.project_name = name - url = urljoin(self.base_url, '%s/' % quote(name)) - self._seen.clear() - self._page_cache.clear() - self._prepare_threads() - try: - logger.debug('Queueing %s', url) - self._to_fetch.put(url) - self._to_fetch.join() - finally: - self._wait_threads() - del self.result - return result - - platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|' - r'win(32|_amd64)|macosx_?\d+)\b', re.I) - - def _is_platform_dependent(self, url): - """ - Does an URL refer to a platform-specific download? - """ - return self.platform_dependent.search(url) - - def _process_download(self, url): - """ - See if an URL is a suitable download for a project. - - If it is, register information in the result dictionary (for - _get_project) about the specific version it's for. - - Note that the return value isn't actually used other than as a boolean - value. - """ - if self.platform_check and self._is_platform_dependent(url): - info = None - else: - info = self.convert_url_to_download_info(url, self.project_name) - logger.debug('process_download: %s -> %s', url, info) - if info: - with self._lock: # needed because self.result is shared - self._update_version_data(self.result, info) - return info - - def _should_queue(self, link, referrer, rel): - """ - Determine whether a link URL from a referring page and with a - particular "rel" attribute should be queued for scraping. - """ - scheme, netloc, path, _, _, _ = urlparse(link) - if path.endswith(self.source_extensions + self.binary_extensions + - self.excluded_extensions): - result = False - elif self.skip_externals and not link.startswith(self.base_url): - result = False - elif not referrer.startswith(self.base_url): - result = False - elif rel not in ('homepage', 'download'): - result = False - elif scheme not in ('http', 'https', 'ftp'): - result = False - elif self._is_platform_dependent(link): - result = False - else: - host = netloc.split(':', 1)[0] - if host.lower() == 'localhost': - result = False - else: - result = True - logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, - referrer, result) - return result - - def _fetch(self): - """ - Get a URL to fetch from the work queue, get the HTML page, examine its - links for download candidates and candidates for further scraping. - - This is a handy method to run in a thread. - """ - while True: - url = self._to_fetch.get() - try: - if url: - page = self.get_page(url) - if page is None: # e.g. after an error - continue - for link, rel in page.links: - if link not in self._seen: - try: - self._seen.add(link) - if (not self._process_download(link) and - self._should_queue(link, url, rel)): - logger.debug('Queueing %s from %s', link, url) - self._to_fetch.put(link) - except MetadataInvalidError: # e.g. invalid versions - pass - except Exception as e: # pragma: no cover - self.errors.put(text_type(e)) - finally: - # always do this, to avoid hangs :-) - self._to_fetch.task_done() - if not url: - #logger.debug('Sentinel seen, quitting.') - break - - def get_page(self, url): - """ - Get the HTML for an URL, possibly from an in-memory cache. - - XXX TODO Note: this cache is never actually cleared. It's assumed that - the data won't get stale over the lifetime of a locator instance (not - necessarily true for the default_locator). - """ - # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api - scheme, netloc, path, _, _, _ = urlparse(url) - if scheme == 'file' and os.path.isdir(url2pathname(path)): - url = urljoin(ensure_slash(url), 'index.html') - - if url in self._page_cache: - result = self._page_cache[url] - logger.debug('Returning %s from cache: %s', url, result) - else: - host = netloc.split(':', 1)[0] - result = None - if host in self._bad_hosts: - logger.debug('Skipping %s due to bad host %s', url, host) - else: - req = Request(url, headers={'Accept-encoding': 'identity'}) - try: - logger.debug('Fetching %s', url) - resp = self.opener.open(req, timeout=self.timeout) - logger.debug('Fetched %s', url) - headers = resp.info() - content_type = headers.get('Content-Type', '') - if HTML_CONTENT_TYPE.match(content_type): - final_url = resp.geturl() - data = resp.read() - encoding = headers.get('Content-Encoding') - if encoding: - decoder = self.decoders[encoding] # fail if not found - data = decoder(data) - encoding = 'utf-8' - m = CHARSET.search(content_type) - if m: - encoding = m.group(1) - try: - data = data.decode(encoding) - except UnicodeError: # pragma: no cover - data = data.decode('latin-1') # fallback - result = Page(data, final_url) - self._page_cache[final_url] = result - except HTTPError as e: - if e.code != 404: - logger.exception('Fetch failed: %s: %s', url, e) - except URLError as e: # pragma: no cover - logger.exception('Fetch failed: %s: %s', url, e) - with self._lock: - self._bad_hosts.add(host) - except Exception as e: # pragma: no cover - logger.exception('Fetch failed: %s: %s', url, e) - finally: - self._page_cache[url] = result # even if None (failure) - return result - - _distname_re = re.compile(']*>([^<]+)<') - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - result = set() - page = self.get_page(self.base_url) - if not page: - raise DistlibException('Unable to get %s' % self.base_url) - for match in self._distname_re.finditer(page.data): - result.add(match.group(1)) - return result - -class DirectoryLocator(Locator): - """ - This class locates distributions in a directory tree. - """ - - def __init__(self, path, **kwargs): - """ - Initialise an instance. - :param path: The root of the directory tree to search. - :param kwargs: Passed to the superclass constructor, - except for: - * recursive - if True (the default), subdirectories are - recursed into. If False, only the top-level directory - is searched, - """ - self.recursive = kwargs.pop('recursive', True) - super(DirectoryLocator, self).__init__(**kwargs) - path = os.path.abspath(path) - if not os.path.isdir(path): # pragma: no cover - raise DistlibException('Not a directory: %r' % path) - self.base_dir = path - - def should_include(self, filename, parent): - """ - Should a filename be considered as a candidate for a distribution - archive? As well as the filename, the directory which contains it - is provided, though not used by the current implementation. - """ - return filename.endswith(self.downloadable_extensions) - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - for root, dirs, files in os.walk(self.base_dir): - for fn in files: - if self.should_include(fn, root): - fn = os.path.join(root, fn) - url = urlunparse(('file', '', - pathname2url(os.path.abspath(fn)), - '', '', '')) - info = self.convert_url_to_download_info(url, name) - if info: - self._update_version_data(result, info) - if not self.recursive: - break - return result - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - result = set() - for root, dirs, files in os.walk(self.base_dir): - for fn in files: - if self.should_include(fn, root): - fn = os.path.join(root, fn) - url = urlunparse(('file', '', - pathname2url(os.path.abspath(fn)), - '', '', '')) - info = self.convert_url_to_download_info(url, None) - if info: - result.add(info['name']) - if not self.recursive: - break - return result - -class JSONLocator(Locator): - """ - This locator uses special extended metadata (not available on PyPI) and is - the basis of performant dependency resolution in distlib. Other locators - require archive downloads before dependencies can be determined! As you - might imagine, that can be slow. - """ - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - raise NotImplementedError('Not available from this locator') - - def _get_project(self, name): - result = {'urls': {}, 'digests': {}} - data = get_project_data(name) - if data: - for info in data.get('files', []): - if info['ptype'] != 'sdist' or info['pyversion'] != 'source': - continue - # We don't store summary in project metadata as it makes - # the data bigger for no benefit during dependency - # resolution - dist = make_dist(data['name'], info['version'], - summary=data.get('summary', - 'Placeholder for summary'), - scheme=self.scheme) - md = dist.metadata - md.source_url = info['url'] - # TODO SHA256 digest - if 'digest' in info and info['digest']: - dist.digest = ('md5', info['digest']) - md.dependencies = info.get('requirements', {}) - dist.exports = info.get('exports', {}) - result[dist.version] = dist - result['urls'].setdefault(dist.version, set()).add(info['url']) - return result - -class DistPathLocator(Locator): - """ - This locator finds installed distributions in a path. It can be useful for - adding to an :class:`AggregatingLocator`. - """ - def __init__(self, distpath, **kwargs): - """ - Initialise an instance. - - :param distpath: A :class:`DistributionPath` instance to search. - """ - super(DistPathLocator, self).__init__(**kwargs) - assert isinstance(distpath, DistributionPath) - self.distpath = distpath - - def _get_project(self, name): - dist = self.distpath.get_distribution(name) - if dist is None: - result = {'urls': {}, 'digests': {}} - else: - result = { - dist.version: dist, - 'urls': {dist.version: set([dist.source_url])}, - 'digests': {dist.version: set([None])} - } - return result - - -class AggregatingLocator(Locator): - """ - This class allows you to chain and/or merge a list of locators. - """ - def __init__(self, *locators, **kwargs): - """ - Initialise an instance. - - :param locators: The list of locators to search. - :param kwargs: Passed to the superclass constructor, - except for: - * merge - if False (the default), the first successful - search from any of the locators is returned. If True, - the results from all locators are merged (this can be - slow). - """ - self.merge = kwargs.pop('merge', False) - self.locators = locators - super(AggregatingLocator, self).__init__(**kwargs) - - def clear_cache(self): - super(AggregatingLocator, self).clear_cache() - for locator in self.locators: - locator.clear_cache() - - def _set_scheme(self, value): - self._scheme = value - for locator in self.locators: - locator.scheme = value - - scheme = property(Locator.scheme.fget, _set_scheme) - - def _get_project(self, name): - result = {} - for locator in self.locators: - d = locator.get_project(name) - if d: - if self.merge: - files = result.get('urls', {}) - digests = result.get('digests', {}) - # next line could overwrite result['urls'], result['digests'] - result.update(d) - df = result.get('urls') - if files and df: - for k, v in files.items(): - if k in df: - df[k] |= v - else: - df[k] = v - dd = result.get('digests') - if digests and dd: - dd.update(digests) - else: - # See issue #18. If any dists are found and we're looking - # for specific constraints, we only return something if - # a match is found. For example, if a DirectoryLocator - # returns just foo (1.0) while we're looking for - # foo (>= 2.0), we'll pretend there was nothing there so - # that subsequent locators can be queried. Otherwise we - # would just return foo (1.0) which would then lead to a - # failure to find foo (>= 2.0), because other locators - # weren't searched. Note that this only matters when - # merge=False. - if self.matcher is None: - found = True - else: - found = False - for k in d: - if self.matcher.match(k): - found = True - break - if found: - result = d - break - return result - - def get_distribution_names(self): - """ - Return all the distribution names known to this locator. - """ - result = set() - for locator in self.locators: - try: - result |= locator.get_distribution_names() - except NotImplementedError: - pass - return result - - -# We use a legacy scheme simply because most of the dists on PyPI use legacy -# versions which don't conform to PEP 440. -default_locator = AggregatingLocator( - # JSONLocator(), # don't use as PEP 426 is withdrawn - SimpleScrapingLocator('https://pypi.org/simple/', - timeout=3.0), - scheme='legacy') - -locate = default_locator.locate - - -class DependencyFinder(object): - """ - Locate dependencies for distributions. - """ - - def __init__(self, locator=None): - """ - Initialise an instance, using the specified locator - to locate distributions. - """ - self.locator = locator or default_locator - self.scheme = get_scheme(self.locator.scheme) - - def add_distribution(self, dist): - """ - Add a distribution to the finder. This will update internal information - about who provides what. - :param dist: The distribution to add. - """ - logger.debug('adding distribution %s', dist) - name = dist.key - self.dists_by_name[name] = dist - self.dists[(name, dist.version)] = dist - for p in dist.provides: - name, version = parse_name_and_version(p) - logger.debug('Add to provided: %s, %s, %s', name, version, dist) - self.provided.setdefault(name, set()).add((version, dist)) - - def remove_distribution(self, dist): - """ - Remove a distribution from the finder. This will update internal - information about who provides what. - :param dist: The distribution to remove. - """ - logger.debug('removing distribution %s', dist) - name = dist.key - del self.dists_by_name[name] - del self.dists[(name, dist.version)] - for p in dist.provides: - name, version = parse_name_and_version(p) - logger.debug('Remove from provided: %s, %s, %s', name, version, dist) - s = self.provided[name] - s.remove((version, dist)) - if not s: - del self.provided[name] - - def get_matcher(self, reqt): - """ - Get a version matcher for a requirement. - :param reqt: The requirement - :type reqt: str - :return: A version matcher (an instance of - :class:`distlib.version.Matcher`). - """ - try: - matcher = self.scheme.matcher(reqt) - except UnsupportedVersionError: # pragma: no cover - # XXX compat-mode if cannot read the version - name = reqt.split()[0] - matcher = self.scheme.matcher(name) - return matcher - - def find_providers(self, reqt): - """ - Find the distributions which can fulfill a requirement. - - :param reqt: The requirement. - :type reqt: str - :return: A set of distribution which can fulfill the requirement. - """ - matcher = self.get_matcher(reqt) - name = matcher.key # case-insensitive - result = set() - provided = self.provided - if name in provided: - for version, provider in provided[name]: - try: - match = matcher.match(version) - except UnsupportedVersionError: - match = False - - if match: - result.add(provider) - break - return result - - def try_to_replace(self, provider, other, problems): - """ - Attempt to replace one provider with another. This is typically used - when resolving dependencies from multiple sources, e.g. A requires - (B >= 1.0) while C requires (B >= 1.1). - - For successful replacement, ``provider`` must meet all the requirements - which ``other`` fulfills. - - :param provider: The provider we are trying to replace with. - :param other: The provider we're trying to replace. - :param problems: If False is returned, this will contain what - problems prevented replacement. This is currently - a tuple of the literal string 'cantreplace', - ``provider``, ``other`` and the set of requirements - that ``provider`` couldn't fulfill. - :return: True if we can replace ``other`` with ``provider``, else - False. - """ - rlist = self.reqts[other] - unmatched = set() - for s in rlist: - matcher = self.get_matcher(s) - if not matcher.match(provider.version): - unmatched.add(s) - if unmatched: - # can't replace other with provider - problems.add(('cantreplace', provider, other, - frozenset(unmatched))) - result = False - else: - # can replace other with provider - self.remove_distribution(other) - del self.reqts[other] - for s in rlist: - self.reqts.setdefault(provider, set()).add(s) - self.add_distribution(provider) - result = True - return result - - def find(self, requirement, meta_extras=None, prereleases=False): - """ - Find a distribution and all distributions it depends on. - - :param requirement: The requirement specifying the distribution to - find, or a Distribution instance. - :param meta_extras: A list of meta extras such as :test:, :build: and - so on. - :param prereleases: If ``True``, allow pre-release versions to be - returned - otherwise, don't return prereleases - unless they're all that's available. - - Return a set of :class:`Distribution` instances and a set of - problems. - - The distributions returned should be such that they have the - :attr:`required` attribute set to ``True`` if they were - from the ``requirement`` passed to ``find()``, and they have the - :attr:`build_time_dependency` attribute set to ``True`` unless they - are post-installation dependencies of the ``requirement``. - - The problems should be a tuple consisting of the string - ``'unsatisfied'`` and the requirement which couldn't be satisfied - by any distribution known to the locator. - """ - - self.provided = {} - self.dists = {} - self.dists_by_name = {} - self.reqts = {} - - meta_extras = set(meta_extras or []) - if ':*:' in meta_extras: - meta_extras.remove(':*:') - # :meta: and :run: are implicitly included - meta_extras |= set([':test:', ':build:', ':dev:']) - - if isinstance(requirement, Distribution): - dist = odist = requirement - logger.debug('passed %s as requirement', odist) - else: - dist = odist = self.locator.locate(requirement, - prereleases=prereleases) - if dist is None: - raise DistlibException('Unable to locate %r' % requirement) - logger.debug('located %s', odist) - dist.requested = True - problems = set() - todo = set([dist]) - install_dists = set([odist]) - while todo: - dist = todo.pop() - name = dist.key # case-insensitive - if name not in self.dists_by_name: - self.add_distribution(dist) - else: - #import pdb; pdb.set_trace() - other = self.dists_by_name[name] - if other != dist: - self.try_to_replace(dist, other, problems) - - ireqts = dist.run_requires | dist.meta_requires - sreqts = dist.build_requires - ereqts = set() - if meta_extras and dist in install_dists: - for key in ('test', 'build', 'dev'): - e = ':%s:' % key - if e in meta_extras: - ereqts |= getattr(dist, '%s_requires' % key) - all_reqts = ireqts | sreqts | ereqts - for r in all_reqts: - providers = self.find_providers(r) - if not providers: - logger.debug('No providers found for %r', r) - provider = self.locator.locate(r, prereleases=prereleases) - # If no provider is found and we didn't consider - # prereleases, consider them now. - if provider is None and not prereleases: - provider = self.locator.locate(r, prereleases=True) - if provider is None: - logger.debug('Cannot satisfy %r', r) - problems.add(('unsatisfied', r)) - else: - n, v = provider.key, provider.version - if (n, v) not in self.dists: - todo.add(provider) - providers.add(provider) - if r in ireqts and dist in install_dists: - install_dists.add(provider) - logger.debug('Adding %s to install_dists', - provider.name_and_version) - for p in providers: - name = p.key - if name not in self.dists_by_name: - self.reqts.setdefault(p, set()).add(r) - else: - other = self.dists_by_name[name] - if other != p: - # see if other can be replaced by p - self.try_to_replace(p, other, problems) - - dists = set(self.dists.values()) - for dist in dists: - dist.build_time_dependency = dist not in install_dists - if dist.build_time_dependency: - logger.debug('%s is a build-time dependency only.', - dist.name_and_version) - logger.debug('find done for %s', odist) - return dists, problems diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_windows_renderer.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_windows_renderer.py deleted file mode 100644 index 5ece05649e7268a75c82de6ced552619ffc093ab..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_windows_renderer.py +++ /dev/null @@ -1,56 +0,0 @@ -from typing import Iterable, Sequence, Tuple, cast - -from pip._vendor.rich._win32_console import LegacyWindowsTerm, WindowsCoordinates -from pip._vendor.rich.segment import ControlCode, ControlType, Segment - - -def legacy_windows_render(buffer: Iterable[Segment], term: LegacyWindowsTerm) -> None: - """Makes appropriate Windows Console API calls based on the segments in the buffer. - - Args: - buffer (Iterable[Segment]): Iterable of Segments to convert to Win32 API calls. - term (LegacyWindowsTerm): Used to call the Windows Console API. - """ - for text, style, control in buffer: - if not control: - if style: - term.write_styled(text, style) - else: - term.write_text(text) - else: - control_codes: Sequence[ControlCode] = control - for control_code in control_codes: - control_type = control_code[0] - if control_type == ControlType.CURSOR_MOVE_TO: - _, x, y = cast(Tuple[ControlType, int, int], control_code) - term.move_cursor_to(WindowsCoordinates(row=y - 1, col=x - 1)) - elif control_type == ControlType.CARRIAGE_RETURN: - term.write_text("\r") - elif control_type == ControlType.HOME: - term.move_cursor_to(WindowsCoordinates(0, 0)) - elif control_type == ControlType.CURSOR_UP: - term.move_cursor_up() - elif control_type == ControlType.CURSOR_DOWN: - term.move_cursor_down() - elif control_type == ControlType.CURSOR_FORWARD: - term.move_cursor_forward() - elif control_type == ControlType.CURSOR_BACKWARD: - term.move_cursor_backward() - elif control_type == ControlType.CURSOR_MOVE_TO_COLUMN: - _, column = cast(Tuple[ControlType, int], control_code) - term.move_cursor_to_column(column - 1) - elif control_type == ControlType.HIDE_CURSOR: - term.hide_cursor() - elif control_type == ControlType.SHOW_CURSOR: - term.show_cursor() - elif control_type == ControlType.ERASE_IN_LINE: - _, mode = cast(Tuple[ControlType, int], control_code) - if mode == 0: - term.erase_end_of_line() - elif mode == 1: - term.erase_start_of_line() - elif mode == 2: - term.erase_line() - elif control_type == ControlType.SET_WINDOW_TITLE: - _, title = cast(Tuple[ControlType, str], control_code) - term.set_title(title) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/py38compat.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/py38compat.py deleted file mode 100644 index 59224e71e50c49e5f9f6f925837597c035a8ab7f..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_distutils/py38compat.py +++ /dev/null @@ -1,8 +0,0 @@ -def aix_platform(osname, version, release): - try: - import _aix_support - - return _aix_support.aix_platform() - except ImportError: - pass - return "{}-{}.{}".format(osname, version, release) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_path.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_path.py deleted file mode 100644 index 3767523b784bb93b5b79890eff359628fcfcaa34..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/_path.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -from typing import Union - -_Path = Union[str, os.PathLike] - - -def ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - os.makedirs(dirname, exist_ok=True) - - -def same_path(p1: _Path, p2: _Path) -> bool: - """Differs from os.path.samefile because it does not require paths to exist. - Purely string based (no comparison between i-nodes). - >>> same_path("a/b", "./a/b") - True - >>> same_path("a/b", "a/./b") - True - >>> same_path("a/b", "././a/b") - True - >>> same_path("a/b", "./a/b/c/..") - True - >>> same_path("a/b", "../a/b/c") - False - >>> same_path("a", "a/b") - False - """ - return os.path.normpath(p1) == os.path.normpath(p2) diff --git a/spaces/Realcat/image-matching-webui/third_party/Roma/roma/utils/__init__.py b/spaces/Realcat/image-matching-webui/third_party/Roma/roma/utils/__init__.py deleted file mode 100644 index 2709f5e586150289085a4e2cbd458bc443fab7f3..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/Roma/roma/utils/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -from .utils import ( - pose_auc, - get_pose, - compute_relative_pose, - compute_pose_error, - estimate_pose, - estimate_pose_uncalibrated, - rotate_intrinsic, - get_tuple_transform_ops, - get_depth_tuple_transform_ops, - warp_kpts, - numpy_to_pil, - tensor_to_pil, - recover_pose, - signed_left_to_right_epipolar_distance, -) diff --git a/spaces/Realcat/image-matching-webui/third_party/SuperGluePretrainedNetwork/models/superglue.py b/spaces/Realcat/image-matching-webui/third_party/SuperGluePretrainedNetwork/models/superglue.py deleted file mode 100644 index 70156e07b83614b1dfb36207ea96b4b79a6ddbb9..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/SuperGluePretrainedNetwork/models/superglue.py +++ /dev/null @@ -1,304 +0,0 @@ -# %BANNER_BEGIN% -# --------------------------------------------------------------------- -# %COPYRIGHT_BEGIN% -# -# Magic Leap, Inc. ("COMPANY") CONFIDENTIAL -# -# Unpublished Copyright (c) 2020 -# Magic Leap, Inc., All Rights Reserved. -# -# NOTICE: All information contained herein is, and remains the property -# of COMPANY. The intellectual and technical concepts contained herein -# are proprietary to COMPANY and may be covered by U.S. and Foreign -# Patents, patents in process, and are protected by trade secret or -# copyright law. Dissemination of this information or reproduction of -# this material is strictly forbidden unless prior written permission is -# obtained from COMPANY. Access to the source code contained herein is -# hereby forbidden to anyone except current COMPANY employees, managers -# or contractors who have executed Confidentiality and Non-disclosure -# agreements explicitly covering such access. -# -# The copyright notice above does not evidence any actual or intended -# publication or disclosure of this source code, which includes -# information that is confidential and/or proprietary, and is a trade -# secret, of COMPANY. ANY REPRODUCTION, MODIFICATION, DISTRIBUTION, -# PUBLIC PERFORMANCE, OR PUBLIC DISPLAY OF OR THROUGH USE OF THIS -# SOURCE CODE WITHOUT THE EXPRESS WRITTEN CONSENT OF COMPANY IS -# STRICTLY PROHIBITED, AND IN VIOLATION OF APPLICABLE LAWS AND -# INTERNATIONAL TREATIES. THE RECEIPT OR POSSESSION OF THIS SOURCE -# CODE AND/OR RELATED INFORMATION DOES NOT CONVEY OR IMPLY ANY RIGHTS -# TO REPRODUCE, DISCLOSE OR DISTRIBUTE ITS CONTENTS, OR TO MANUFACTURE, -# USE, OR SELL ANYTHING THAT IT MAY DESCRIBE, IN WHOLE OR IN PART. -# -# %COPYRIGHT_END% -# ---------------------------------------------------------------------- -# %AUTHORS_BEGIN% -# -# Originating Authors: Paul-Edouard Sarlin -# -# %AUTHORS_END% -# --------------------------------------------------------------------*/ -# %BANNER_END% - -from copy import deepcopy -from pathlib import Path -from typing import List, Tuple - -import torch -from torch import nn - - -def MLP(channels: List[int], do_bn: bool = True) -> nn.Module: - """Multi-layer perceptron""" - n = len(channels) - layers = [] - for i in range(1, n): - layers.append(nn.Conv1d(channels[i - 1], channels[i], kernel_size=1, bias=True)) - if i < (n - 1): - if do_bn: - layers.append(nn.BatchNorm1d(channels[i])) - layers.append(nn.ReLU()) - return nn.Sequential(*layers) - - -def normalize_keypoints(kpts, image_shape): - """Normalize keypoints locations based on image image_shape""" - _, _, height, width = image_shape - one = kpts.new_tensor(1) - size = torch.stack([one * width, one * height])[None] - center = size / 2 - scaling = size.max(1, keepdim=True).values * 0.7 - return (kpts - center[:, None, :]) / scaling[:, None, :] - - -class KeypointEncoder(nn.Module): - """Joint encoding of visual appearance and location using MLPs""" - - def __init__(self, feature_dim: int, layers: List[int]) -> None: - super().__init__() - self.encoder = MLP([3] + layers + [feature_dim]) - nn.init.constant_(self.encoder[-1].bias, 0.0) - - def forward(self, kpts, scores): - inputs = [kpts.transpose(1, 2), scores.unsqueeze(1)] - return self.encoder(torch.cat(inputs, dim=1)) - - -def attention( - query: torch.Tensor, key: torch.Tensor, value: torch.Tensor -) -> Tuple[torch.Tensor, torch.Tensor]: - dim = query.shape[1] - scores = torch.einsum("bdhn,bdhm->bhnm", query, key) / dim**0.5 - prob = torch.nn.functional.softmax(scores, dim=-1) - return torch.einsum("bhnm,bdhm->bdhn", prob, value), prob - - -class MultiHeadedAttention(nn.Module): - """Multi-head attention to increase model expressivitiy""" - - def __init__(self, num_heads: int, d_model: int): - super().__init__() - assert d_model % num_heads == 0 - self.dim = d_model // num_heads - self.num_heads = num_heads - self.merge = nn.Conv1d(d_model, d_model, kernel_size=1) - self.proj = nn.ModuleList([deepcopy(self.merge) for _ in range(3)]) - - def forward( - self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor - ) -> torch.Tensor: - batch_dim = query.size(0) - query, key, value = [ - l(x).view(batch_dim, self.dim, self.num_heads, -1) - for l, x in zip(self.proj, (query, key, value)) - ] - x, _ = attention(query, key, value) - return self.merge(x.contiguous().view(batch_dim, self.dim * self.num_heads, -1)) - - -class AttentionalPropagation(nn.Module): - def __init__(self, feature_dim: int, num_heads: int): - super().__init__() - self.attn = MultiHeadedAttention(num_heads, feature_dim) - self.mlp = MLP([feature_dim * 2, feature_dim * 2, feature_dim]) - nn.init.constant_(self.mlp[-1].bias, 0.0) - - def forward(self, x: torch.Tensor, source: torch.Tensor) -> torch.Tensor: - message = self.attn(x, source, source) - return self.mlp(torch.cat([x, message], dim=1)) - - -class AttentionalGNN(nn.Module): - def __init__(self, feature_dim: int, layer_names: List[str]) -> None: - super().__init__() - self.layers = nn.ModuleList( - [AttentionalPropagation(feature_dim, 4) for _ in range(len(layer_names))] - ) - self.names = layer_names - - def forward( - self, desc0: torch.Tensor, desc1: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - for layer, name in zip(self.layers, self.names): - if name == "cross": - src0, src1 = desc1, desc0 - else: # if name == 'self': - src0, src1 = desc0, desc1 - delta0, delta1 = layer(desc0, src0), layer(desc1, src1) - desc0, desc1 = (desc0 + delta0), (desc1 + delta1) - return desc0, desc1 - - -def log_sinkhorn_iterations( - Z: torch.Tensor, log_mu: torch.Tensor, log_nu: torch.Tensor, iters: int -) -> torch.Tensor: - """Perform Sinkhorn Normalization in Log-space for stability""" - u, v = torch.zeros_like(log_mu), torch.zeros_like(log_nu) - for _ in range(iters): - u = log_mu - torch.logsumexp(Z + v.unsqueeze(1), dim=2) - v = log_nu - torch.logsumexp(Z + u.unsqueeze(2), dim=1) - return Z + u.unsqueeze(2) + v.unsqueeze(1) - - -def log_optimal_transport( - scores: torch.Tensor, alpha: torch.Tensor, iters: int -) -> torch.Tensor: - """Perform Differentiable Optimal Transport in Log-space for stability""" - b, m, n = scores.shape - one = scores.new_tensor(1) - ms, ns = (m * one).to(scores), (n * one).to(scores) - - bins0 = alpha.expand(b, m, 1) - bins1 = alpha.expand(b, 1, n) - alpha = alpha.expand(b, 1, 1) - - couplings = torch.cat( - [torch.cat([scores, bins0], -1), torch.cat([bins1, alpha], -1)], 1 - ) - - norm = -(ms + ns).log() - log_mu = torch.cat([norm.expand(m), ns.log()[None] + norm]) - log_nu = torch.cat([norm.expand(n), ms.log()[None] + norm]) - log_mu, log_nu = log_mu[None].expand(b, -1), log_nu[None].expand(b, -1) - - Z = log_sinkhorn_iterations(couplings, log_mu, log_nu, iters) - Z = Z - norm # multiply probabilities by M+N - return Z - - -def arange_like(x, dim: int): - return x.new_ones(x.shape[dim]).cumsum(0) - 1 # traceable in 1.1 - - -class SuperGlue(nn.Module): - """SuperGlue feature matching middle-end - - Given two sets of keypoints and locations, we determine the - correspondences by: - 1. Keypoint Encoding (normalization + visual feature and location fusion) - 2. Graph Neural Network with multiple self and cross-attention layers - 3. Final projection layer - 4. Optimal Transport Layer (a differentiable Hungarian matching algorithm) - 5. Thresholding matrix based on mutual exclusivity and a match_threshold - - The correspondence ids use -1 to indicate non-matching points. - - Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew - Rabinovich. SuperGlue: Learning Feature Matching with Graph Neural - Networks. In CVPR, 2020. https://arxiv.org/abs/1911.11763 - - """ - - default_config = { - "descriptor_dim": 256, - "weights": "indoor", - "keypoint_encoder": [32, 64, 128, 256], - "GNN_layers": ["self", "cross"] * 9, - "sinkhorn_iterations": 100, - "match_threshold": 0.2, - } - - def __init__(self, config): - super().__init__() - self.config = {**self.default_config, **config} - - self.kenc = KeypointEncoder( - self.config["descriptor_dim"], self.config["keypoint_encoder"] - ) - - self.gnn = AttentionalGNN( - feature_dim=self.config["descriptor_dim"], - layer_names=self.config["GNN_layers"], - ) - - self.final_proj = nn.Conv1d( - self.config["descriptor_dim"], - self.config["descriptor_dim"], - kernel_size=1, - bias=True, - ) - - bin_score = torch.nn.Parameter(torch.tensor(1.0)) - self.register_parameter("bin_score", bin_score) - - assert self.config["weights"] in ["indoor", "outdoor"] - path = Path(__file__).parent - path = path / "weights/superglue_{}.pth".format(self.config["weights"]) - self.load_state_dict(torch.load(str(path))) - print('Loaded SuperGlue model ("{}" weights)'.format(self.config["weights"])) - - def forward(self, data): - """Run SuperGlue on a pair of keypoints and descriptors""" - desc0, desc1 = data["descriptors0"], data["descriptors1"] - kpts0, kpts1 = data["keypoints0"], data["keypoints1"] - - if kpts0.shape[1] == 0 or kpts1.shape[1] == 0: # no keypoints - shape0, shape1 = kpts0.shape[:-1], kpts1.shape[:-1] - return { - "matches0": kpts0.new_full(shape0, -1, dtype=torch.int), - "matches1": kpts1.new_full(shape1, -1, dtype=torch.int), - "matching_scores0": kpts0.new_zeros(shape0), - "matching_scores1": kpts1.new_zeros(shape1), - } - - # Keypoint normalization. - kpts0 = normalize_keypoints(kpts0, data["image0"].shape) - kpts1 = normalize_keypoints(kpts1, data["image1"].shape) - - # Keypoint MLP encoder. - desc0 = desc0 + self.kenc(kpts0, data["scores0"]) - desc1 = desc1 + self.kenc(kpts1, data["scores1"]) - - # Multi-layer Transformer network. - desc0, desc1 = self.gnn(desc0, desc1) - - # Final MLP projection. - mdesc0, mdesc1 = self.final_proj(desc0), self.final_proj(desc1) - - # Compute matching descriptor distance. - scores = torch.einsum("bdn,bdm->bnm", mdesc0, mdesc1) - scores = scores / self.config["descriptor_dim"] ** 0.5 - - # Run the optimal transport. - scores = log_optimal_transport( - scores, self.bin_score, iters=self.config["sinkhorn_iterations"] - ) - - # Get the matches with score above "match_threshold". - max0, max1 = scores[:, :-1, :-1].max(2), scores[:, :-1, :-1].max(1) - indices0, indices1 = max0.indices, max1.indices - mutual0 = arange_like(indices0, 1)[None] == indices1.gather(1, indices0) - mutual1 = arange_like(indices1, 1)[None] == indices0.gather(1, indices1) - zero = scores.new_tensor(0) - mscores0 = torch.where(mutual0, max0.values.exp(), zero) - mscores1 = torch.where(mutual1, mscores0.gather(1, indices1), zero) - valid0 = mutual0 & (mscores0 > self.config["match_threshold"]) - valid1 = mutual1 & valid0.gather(1, indices1) - indices0 = torch.where(valid0, indices0, indices0.new_tensor(-1)) - indices1 = torch.where(valid1, indices1, indices1.new_tensor(-1)) - return { - "matches0": indices0, # use -1 for invalid match - "matches1": indices1, # use -1 for invalid match - "matching_scores0": mscores0, - "matching_scores1": mscores1, - } diff --git a/spaces/Retinalogic/pastel-mix/app.py b/spaces/Retinalogic/pastel-mix/app.py deleted file mode 100644 index f90587fb1b4538cf5a92df4655fed8e4e2cf9579..0000000000000000000000000000000000000000 --- a/spaces/Retinalogic/pastel-mix/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/andite/pastel-mix").launch() \ No newline at end of file diff --git a/spaces/RichardMB1217/blip2/README.md b/spaces/RichardMB1217/blip2/README.md deleted file mode 100644 index 31166de4dede8fccf1cbb5935adbb4c228443d90..0000000000000000000000000000000000000000 --- a/spaces/RichardMB1217/blip2/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: BLIP2 -emoji: 🌖 -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: bsd-3-clause -models: - - Salesforce/blip2-opt-6.7b - - Salesforce/blip2-flan-t5-xxl ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/wider_face.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/wider_face.py deleted file mode 100644 index 3a13907db87a9986a7d701837259a0b712fc9dca..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/datasets/wider_face.py +++ /dev/null @@ -1,51 +0,0 @@ -import os.path as osp -import xml.etree.ElementTree as ET - -import mmcv - -from .builder import DATASETS -from .xml_style import XMLDataset - - -@DATASETS.register_module() -class WIDERFaceDataset(XMLDataset): - """Reader for the WIDER Face dataset in PASCAL VOC format. - - Conversion scripts can be found in - https://github.com/sovrasov/wider-face-pascal-voc-annotations - """ - CLASSES = ('face', ) - - def __init__(self, **kwargs): - super(WIDERFaceDataset, self).__init__(**kwargs) - - def load_annotations(self, ann_file): - """Load annotation from WIDERFace XML style annotation file. - - Args: - ann_file (str): Path of XML file. - - Returns: - list[dict]: Annotation info from XML file. - """ - - data_infos = [] - img_ids = mmcv.list_from_file(ann_file) - for img_id in img_ids: - filename = f'{img_id}.jpg' - xml_path = osp.join(self.img_prefix, 'Annotations', - f'{img_id}.xml') - tree = ET.parse(xml_path) - root = tree.getroot() - size = root.find('size') - width = int(size.find('width').text) - height = int(size.find('height').text) - folder = root.find('folder').text - data_infos.append( - dict( - id=img_id, - filename=osp.join(folder, filename), - width=width, - height=height)) - - return data_infos diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/losses/accuracy.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/losses/accuracy.py deleted file mode 100644 index 789a2240a491289c5801b6690116e8ca657d004f..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/losses/accuracy.py +++ /dev/null @@ -1,78 +0,0 @@ -import mmcv -import torch.nn as nn - - -@mmcv.jit(coderize=True) -def accuracy(pred, target, topk=1, thresh=None): - """Calculate accuracy according to the prediction and target. - - Args: - pred (torch.Tensor): The model prediction, shape (N, num_class) - target (torch.Tensor): The target of each prediction, shape (N, ) - topk (int | tuple[int], optional): If the predictions in ``topk`` - matches the target, the predictions will be regarded as - correct ones. Defaults to 1. - thresh (float, optional): If not None, predictions with scores under - this threshold are considered incorrect. Default to None. - - Returns: - float | tuple[float]: If the input ``topk`` is a single integer, - the function will return a single float as accuracy. If - ``topk`` is a tuple containing multiple integers, the - function will return a tuple containing accuracies of - each ``topk`` number. - """ - assert isinstance(topk, (int, tuple)) - if isinstance(topk, int): - topk = (topk, ) - return_single = True - else: - return_single = False - - maxk = max(topk) - if pred.size(0) == 0: - accu = [pred.new_tensor(0.) for i in range(len(topk))] - return accu[0] if return_single else accu - assert pred.ndim == 2 and target.ndim == 1 - assert pred.size(0) == target.size(0) - assert maxk <= pred.size(1), \ - f'maxk {maxk} exceeds pred dimension {pred.size(1)}' - pred_value, pred_label = pred.topk(maxk, dim=1) - pred_label = pred_label.t() # transpose to shape (maxk, N) - correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) - if thresh is not None: - # Only prediction values larger than thresh are counted as correct - correct = correct & (pred_value > thresh).t() - res = [] - for k in topk: - correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True) - res.append(correct_k.mul_(100.0 / pred.size(0))) - return res[0] if return_single else res - - -class Accuracy(nn.Module): - - def __init__(self, topk=(1, ), thresh=None): - """Module to calculate the accuracy. - - Args: - topk (tuple, optional): The criterion used to calculate the - accuracy. Defaults to (1,). - thresh (float, optional): If not None, predictions with scores - under this threshold are considered incorrect. Default to None. - """ - super().__init__() - self.topk = topk - self.thresh = thresh - - def forward(self, pred, target): - """Forward function to calculate accuracy. - - Args: - pred (torch.Tensor): Prediction of models. - target (torch.Tensor): Target for each prediction. - - Returns: - tuple[float]: The accuracies under different topk criterions. - """ - return accuracy(pred, target, self.topk, self.thresh) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/necks/fpn.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/necks/fpn.py deleted file mode 100644 index 5e5dfe685964f06e7a66b63a13e66162e63fcafd..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/necks/fpn.py +++ /dev/null @@ -1,221 +0,0 @@ -import warnings - -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, xavier_init -from mmcv.runner import auto_fp16 - -from ..builder import NECKS - - -@NECKS.register_module() -class FPN(nn.Module): - r"""Feature Pyramid Network. - - This is an implementation of paper `Feature Pyramid Networks for Object - Detection `_. - - Args: - in_channels (List[int]): Number of input channels per scale. - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool | str): If bool, it decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, its actual mode is specified by `extra_convs_on_inputs`. - If str, it specifies the source feature map of the extra convs. - Only the following options are allowed - - - 'on_input': Last feat map of neck inputs (i.e. backbone feature). - - 'on_lateral': Last feature map after lateral convs. - - 'on_output': The last output feature map after fpn convs. - extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs - on the original feature from the backbone. If True, - it is equivalent to `add_extra_convs='on_input'`. If False, it is - equivalent to set `add_extra_convs='on_output'`. Default to True. - relu_before_extra_convs (bool): Whether to apply relu before the extra - conv. Default: False. - no_norm_on_lateral (bool): Whether to apply norm on lateral. - Default: False. - conv_cfg (dict): Config dict for convolution layer. Default: None. - norm_cfg (dict): Config dict for normalization layer. Default: None. - act_cfg (str): Config dict for activation layer in ConvModule. - Default: None. - upsample_cfg (dict): Config dict for interpolate layer. - Default: `dict(mode='nearest')` - - Example: - >>> import torch - >>> in_channels = [2, 3, 5, 7] - >>> scales = [340, 170, 84, 43] - >>> inputs = [torch.rand(1, c, s, s) - ... for c, s in zip(in_channels, scales)] - >>> self = FPN(in_channels, 11, len(in_channels)).eval() - >>> outputs = self.forward(inputs) - >>> for i in range(len(outputs)): - ... print(f'outputs[{i}].shape = {outputs[i].shape}') - outputs[0].shape = torch.Size([1, 11, 340, 340]) - outputs[1].shape = torch.Size([1, 11, 170, 170]) - outputs[2].shape = torch.Size([1, 11, 84, 84]) - outputs[3].shape = torch.Size([1, 11, 43, 43]) - """ - - def __init__(self, - in_channels, - out_channels, - num_outs, - start_level=0, - end_level=-1, - add_extra_convs=False, - extra_convs_on_inputs=True, - relu_before_extra_convs=False, - no_norm_on_lateral=False, - conv_cfg=None, - norm_cfg=None, - act_cfg=None, - upsample_cfg=dict(mode='nearest')): - super(FPN, self).__init__() - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - self.relu_before_extra_convs = relu_before_extra_convs - self.no_norm_on_lateral = no_norm_on_lateral - self.fp16_enabled = False - self.upsample_cfg = upsample_cfg.copy() - - if end_level == -1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level < inputs, no extra level is allowed - self.backbone_end_level = end_level - assert end_level <= len(in_channels) - assert num_outs == end_level - start_level - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - assert isinstance(add_extra_convs, (str, bool)) - if isinstance(add_extra_convs, str): - # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output' - assert add_extra_convs in ('on_input', 'on_lateral', 'on_output') - elif add_extra_convs: # True - if extra_convs_on_inputs: - # TODO: deprecate `extra_convs_on_inputs` - warnings.simplefilter('once') - warnings.warn( - '"extra_convs_on_inputs" will be deprecated in v2.9.0,' - 'Please use "add_extra_convs"', DeprecationWarning) - self.add_extra_convs = 'on_input' - else: - self.add_extra_convs = 'on_output' - - self.lateral_convs = nn.ModuleList() - self.fpn_convs = nn.ModuleList() - - for i in range(self.start_level, self.backbone_end_level): - l_conv = ConvModule( - in_channels[i], - out_channels, - 1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg if not self.no_norm_on_lateral else None, - act_cfg=act_cfg, - inplace=False) - fpn_conv = ConvModule( - out_channels, - out_channels, - 3, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - - self.lateral_convs.append(l_conv) - self.fpn_convs.append(fpn_conv) - - # add extra conv layers (e.g., RetinaNet) - extra_levels = num_outs - self.backbone_end_level + self.start_level - if self.add_extra_convs and extra_levels >= 1: - for i in range(extra_levels): - if i == 0 and self.add_extra_convs == 'on_input': - in_channels = self.in_channels[self.backbone_end_level - 1] - else: - in_channels = out_channels - extra_fpn_conv = ConvModule( - in_channels, - out_channels, - 3, - stride=2, - padding=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg, - inplace=False) - self.fpn_convs.append(extra_fpn_conv) - - # default init_weights for conv(msra) and norm in ConvModule - def init_weights(self): - """Initialize the weights of FPN module.""" - for m in self.modules(): - if isinstance(m, nn.Conv2d): - xavier_init(m, distribution='uniform') - - @auto_fp16() - def forward(self, inputs): - """Forward function.""" - assert len(inputs) == len(self.in_channels) - - # build laterals - laterals = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - - # build top-down path - used_backbone_levels = len(laterals) - for i in range(used_backbone_levels - 1, 0, -1): - # In some cases, fixing `scale factor` (e.g. 2) is preferred, but - # it cannot co-exist with `size` in `F.interpolate`. - if 'scale_factor' in self.upsample_cfg: - laterals[i - 1] += F.interpolate(laterals[i], - **self.upsample_cfg) - else: - prev_shape = laterals[i - 1].shape[2:] - laterals[i - 1] += F.interpolate( - laterals[i], size=prev_shape, **self.upsample_cfg) - - # build outputs - # part 1: from original levels - outs = [ - self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels) - ] - # part 2: add extra levels - if self.num_outs > len(outs): - # use max pool to get more levels on top of outputs - # (e.g., Faster R-CNN, Mask R-CNN) - if not self.add_extra_convs: - for i in range(self.num_outs - used_backbone_levels): - outs.append(F.max_pool2d(outs[-1], 1, stride=2)) - # add conv layers on top of original feature maps (RetinaNet) - else: - if self.add_extra_convs == 'on_input': - extra_source = inputs[self.backbone_end_level - 1] - elif self.add_extra_convs == 'on_lateral': - extra_source = laterals[-1] - elif self.add_extra_convs == 'on_output': - extra_source = outs[-1] - else: - raise NotImplementedError - outs.append(self.fpn_convs[used_backbone_levels](extra_source)) - for i in range(used_backbone_levels + 1, self.num_outs): - if self.relu_before_extra_convs: - outs.append(self.fpn_convs[i](F.relu(outs[-1]))) - else: - outs.append(self.fpn_convs[i](outs[-1])) - return tuple(outs) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/exp/upernet_global_small/test.sh b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/exp/upernet_global_small/test.sh deleted file mode 100644 index d9a85e7a0d3b7c96b060f473d41254b37a382fcb..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/exp/upernet_global_small/test.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -work_path=$(dirname $0) -PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \ -python -m torch.distributed.launch --nproc_per_node=8 \ - tools/test.py ${work_path}/test_config_h32.py \ - ${work_path}/ckpt/latest.pth \ - --launcher pytorch \ - --eval mIoU \ - 2>&1 | tee -a ${work_path}/log.txt diff --git a/spaces/SAAZIZI/SummarizeAV/keyword_retriever/__init__.py b/spaces/SAAZIZI/SummarizeAV/keyword_retriever/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/SeViLA/SeViLA/lavis/models/__init__.py b/spaces/SeViLA/SeViLA/lavis/models/__init__.py deleted file mode 100644 index 6b56f55c6ab1b9b2a14e915850e9ce6c9a1c93be..0000000000000000000000000000000000000000 --- a/spaces/SeViLA/SeViLA/lavis/models/__init__.py +++ /dev/null @@ -1,265 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -import logging -from omegaconf import OmegaConf -from lavis.common.registry import registry - -from lavis.models.base_model import BaseModel - -from lavis.models.albef_models.albef_classification import AlbefClassification -from lavis.models.albef_models.albef_feature_extractor import AlbefFeatureExtractor -from lavis.models.albef_models.albef_nlvr import AlbefNLVR -from lavis.models.albef_models.albef_pretrain import AlbefPretrain -from lavis.models.albef_models.albef_retrieval import AlbefRetrieval -from lavis.models.albef_models.albef_vqa import AlbefVQA -from lavis.models.alpro_models.alpro_qa import AlproQA -from lavis.models.alpro_models.alpro_retrieval import AlproRetrieval - -from lavis.models.blip_models.blip import BlipBase -from lavis.models.blip_models.blip_caption import BlipCaption -from lavis.models.blip_models.blip_classification import BlipClassification -from lavis.models.blip_models.blip_feature_extractor import BlipFeatureExtractor -from lavis.models.blip_models.blip_image_text_matching import BlipITM -from lavis.models.blip_models.blip_nlvr import BlipNLVR -from lavis.models.blip_models.blip_pretrain import BlipPretrain -from lavis.models.blip_models.blip_retrieval import BlipRetrieval -from lavis.models.blip_models.blip_vqa import BlipVQA - -from lavis.models.blip2_models.blip2 import Blip2Base -from lavis.models.blip2_models.blip2_opt import Blip2OPT -from lavis.models.blip2_models.blip2_t5 import Blip2T5 -from lavis.models.blip2_models.blip2_fmr import Blip2FMR -from lavis.models.sevila_models.sevila import SeViLA - -from lavis.models.blip2_models.blip2_qformer import Blip2Qformer -from lavis.models.blip2_models.blip2_image_text_matching import Blip2ITM - -from lavis.models.pnp_vqa_models.pnp_vqa import PNPVQA -from lavis.models.pnp_vqa_models.pnp_unifiedqav2_fid import PNPUnifiedQAv2FiD -from lavis.models.img2prompt_models.img2prompt_vqa import Img2PromptVQA -from lavis.models.med import XBertLMHeadDecoder -from lavis.models.vit import VisionTransformerEncoder -from lavis.models.clip_models.model import CLIP - -from lavis.models.gpt_models.gpt_dialogue import GPTDialogue - -from lavis.processors.base_processor import BaseProcessor - - -__all__ = [ - "load_model", - "AlbefClassification", - "AlbefFeatureExtractor", - "AlbefNLVR", - "AlbefVQA", - "AlbefPretrain", - "AlbefRetrieval", - "AlproQA", - "AlproRetrieval", - "BaseModel", - "BlipBase", - "BlipFeatureExtractor", - "BlipCaption", - "BlipClassification", - "BlipITM", - "BlipNLVR", - "BlipPretrain", - "BlipRetrieval", - "BlipVQA", - "Blip2Qformer", - "Blip2Base", - "Blip2ITM", - "Blip2OPT", - "Blip2T5", - "PNPVQA", - "Img2PromptVQA", - "PNPUnifiedQAv2FiD", - "CLIP", - "VisionTransformerEncoder", - "XBertLMHeadDecoder", - "GPTDialogue", - "Blip2FMR", - "SeViLA", -] - - -def load_model(name, model_type, is_eval=False, device="cpu", checkpoint=None): - """ - Load supported models. - - To list all available models and types in registry: - >>> from lavis.models import model_zoo - >>> print(model_zoo) - - Args: - name (str): name of the model. - model_type (str): type of the model. - is_eval (bool): whether the model is in eval mode. Default: False. - device (str): device to use. Default: "cpu". - checkpoint (str): path or to checkpoint. Default: None. - Note that expecting the checkpoint to have the same keys in state_dict as the model. - - Returns: - model (torch.nn.Module): model. - """ - - model = registry.get_model_class(name).from_pretrained(model_type=model_type) - - if checkpoint is not None: - model.load_checkpoint(checkpoint) - - if is_eval: - model.eval() - - if device == "cpu": - model = model.float() - - return model.to(device) - - -def load_preprocess(config): - """ - Load preprocessor configs and construct preprocessors. - - If no preprocessor is specified, return BaseProcessor, which does not do any preprocessing. - - Args: - config (dict): preprocessor configs. - - Returns: - vis_processors (dict): preprocessors for visual inputs. - txt_processors (dict): preprocessors for text inputs. - - Key is "train" or "eval" for processors used in training and evaluation respectively. - """ - - def _build_proc_from_cfg(cfg): - return ( - registry.get_processor_class(cfg.name).from_config(cfg) - if cfg is not None - else BaseProcessor() - ) - - vis_processors = dict() - txt_processors = dict() - - vis_proc_cfg = config.get("vis_processor") - txt_proc_cfg = config.get("text_processor") - - if vis_proc_cfg is not None: - vis_train_cfg = vis_proc_cfg.get("train") - vis_eval_cfg = vis_proc_cfg.get("eval") - else: - vis_train_cfg = None - vis_eval_cfg = None - - vis_processors["train"] = _build_proc_from_cfg(vis_train_cfg) - vis_processors["eval"] = _build_proc_from_cfg(vis_eval_cfg) - - if txt_proc_cfg is not None: - txt_train_cfg = txt_proc_cfg.get("train") - txt_eval_cfg = txt_proc_cfg.get("eval") - else: - txt_train_cfg = None - txt_eval_cfg = None - - txt_processors["train"] = _build_proc_from_cfg(txt_train_cfg) - txt_processors["eval"] = _build_proc_from_cfg(txt_eval_cfg) - - return vis_processors, txt_processors - - -def load_model_and_preprocess(name, model_type, is_eval=False, device="cpu"): - """ - Load model and its related preprocessors. - - List all available models and types in registry: - >>> from lavis.models import model_zoo - >>> print(model_zoo) - - Args: - name (str): name of the model. - model_type (str): type of the model. - is_eval (bool): whether the model is in eval mode. Default: False. - device (str): device to use. Default: "cpu". - - Returns: - model (torch.nn.Module): model. - vis_processors (dict): preprocessors for visual inputs. - txt_processors (dict): preprocessors for text inputs. - """ - model_cls = registry.get_model_class(name) - - # load model - model = model_cls.from_pretrained(model_type=model_type) - - if is_eval: - model.eval() - - # load preprocess - cfg = OmegaConf.load(model_cls.default_config_path(model_type)) - # print(cfg) - if cfg is not None: - preprocess_cfg = cfg.preprocess - - vis_processors, txt_processors = load_preprocess(preprocess_cfg) - else: - vis_processors, txt_processors = None, None - logging.info( - f"""No default preprocess for model {name} ({model_type}). - This can happen if the model is not finetuned on downstream datasets, - or it is not intended for direct use without finetuning. - """ - ) - - if device == "cpu": - model = model.float() - - return model.to(device), vis_processors, txt_processors - - -class ModelZoo: - """ - A utility class to create string representation of available model architectures and types. - - >>> from lavis.models import model_zoo - >>> # list all available models - >>> print(model_zoo) - >>> # show total number of models - >>> print(len(model_zoo)) - """ - - def __init__(self) -> None: - self.model_zoo = { - k: list(v.PRETRAINED_MODEL_CONFIG_DICT.keys()) - for k, v in registry.mapping["model_name_mapping"].items() - } - - def __str__(self) -> str: - return ( - "=" * 50 - + "\n" - + f"{'Architectures':<30} {'Types'}\n" - + "=" * 50 - + "\n" - + "\n".join( - [ - f"{name:<30} {', '.join(types)}" - for name, types in self.model_zoo.items() - ] - ) - ) - - def __iter__(self): - return iter(self.model_zoo.items()) - - def __len__(self): - return sum([len(v) for v in self.model_zoo.values()]) - - -model_zoo = ModelZoo() diff --git a/spaces/ServerX/PorcoDiaz/demucs/wav.py b/spaces/ServerX/PorcoDiaz/demucs/wav.py deleted file mode 100644 index a65c3b2ba5aacb1fcab3753f1f85ff7b8db7fc11..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/demucs/wav.py +++ /dev/null @@ -1,174 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import OrderedDict -import hashlib -import math -import json -from pathlib import Path - -import julius -import torch as th -from torch import distributed -import torchaudio as ta -from torch.nn import functional as F - -from .audio import convert_audio_channels -from .compressed import get_musdb_tracks - -MIXTURE = "mixture" -EXT = ".wav" - - -def _track_metadata(track, sources): - track_length = None - track_samplerate = None - for source in sources + [MIXTURE]: - file = track / f"{source}{EXT}" - info = ta.info(str(file)) - length = info.num_frames - if track_length is None: - track_length = length - track_samplerate = info.sample_rate - elif track_length != length: - raise ValueError( - f"Invalid length for file {file}: " - f"expecting {track_length} but got {length}.") - elif info.sample_rate != track_samplerate: - raise ValueError( - f"Invalid sample rate for file {file}: " - f"expecting {track_samplerate} but got {info.sample_rate}.") - if source == MIXTURE: - wav, _ = ta.load(str(file)) - wav = wav.mean(0) - mean = wav.mean().item() - std = wav.std().item() - - return {"length": length, "mean": mean, "std": std, "samplerate": track_samplerate} - - -def _build_metadata(path, sources): - meta = {} - path = Path(path) - for file in path.iterdir(): - meta[file.name] = _track_metadata(file, sources) - return meta - - -class Wavset: - def __init__( - self, - root, metadata, sources, - length=None, stride=None, normalize=True, - samplerate=44100, channels=2): - """ - Waveset (or mp3 set for that matter). Can be used to train - with arbitrary sources. Each track should be one folder inside of `path`. - The folder should contain files named `{source}.{ext}`. - Files will be grouped according to `sources` (each source is a list of - filenames). - - Sample rate and channels will be converted on the fly. - - `length` is the sample size to extract (in samples, not duration). - `stride` is how many samples to move by between each example. - """ - self.root = Path(root) - self.metadata = OrderedDict(metadata) - self.length = length - self.stride = stride or length - self.normalize = normalize - self.sources = sources - self.channels = channels - self.samplerate = samplerate - self.num_examples = [] - for name, meta in self.metadata.items(): - track_length = int(self.samplerate * meta['length'] / meta['samplerate']) - if length is None or track_length < length: - examples = 1 - else: - examples = int(math.ceil((track_length - self.length) / self.stride) + 1) - self.num_examples.append(examples) - - def __len__(self): - return sum(self.num_examples) - - def get_file(self, name, source): - return self.root / name / f"{source}{EXT}" - - def __getitem__(self, index): - for name, examples in zip(self.metadata, self.num_examples): - if index >= examples: - index -= examples - continue - meta = self.metadata[name] - num_frames = -1 - offset = 0 - if self.length is not None: - offset = int(math.ceil( - meta['samplerate'] * self.stride * index / self.samplerate)) - num_frames = int(math.ceil( - meta['samplerate'] * self.length / self.samplerate)) - wavs = [] - for source in self.sources: - file = self.get_file(name, source) - wav, _ = ta.load(str(file), frame_offset=offset, num_frames=num_frames) - wav = convert_audio_channels(wav, self.channels) - wavs.append(wav) - - example = th.stack(wavs) - example = julius.resample_frac(example, meta['samplerate'], self.samplerate) - if self.normalize: - example = (example - meta['mean']) / meta['std'] - if self.length: - example = example[..., :self.length] - example = F.pad(example, (0, self.length - example.shape[-1])) - return example - - -def get_wav_datasets(args, samples, sources): - sig = hashlib.sha1(str(args.wav).encode()).hexdigest()[:8] - metadata_file = args.metadata / (sig + ".json") - train_path = args.wav / "train" - valid_path = args.wav / "valid" - if not metadata_file.is_file() and args.rank == 0: - train = _build_metadata(train_path, sources) - valid = _build_metadata(valid_path, sources) - json.dump([train, valid], open(metadata_file, "w")) - if args.world_size > 1: - distributed.barrier() - train, valid = json.load(open(metadata_file)) - train_set = Wavset(train_path, train, sources, - length=samples, stride=args.data_stride, - samplerate=args.samplerate, channels=args.audio_channels, - normalize=args.norm_wav) - valid_set = Wavset(valid_path, valid, [MIXTURE] + sources, - samplerate=args.samplerate, channels=args.audio_channels, - normalize=args.norm_wav) - return train_set, valid_set - - -def get_musdb_wav_datasets(args, samples, sources): - metadata_file = args.metadata / "musdb_wav.json" - root = args.musdb / "train" - if not metadata_file.is_file() and args.rank == 0: - metadata = _build_metadata(root, sources) - json.dump(metadata, open(metadata_file, "w")) - if args.world_size > 1: - distributed.barrier() - metadata = json.load(open(metadata_file)) - - train_tracks = get_musdb_tracks(args.musdb, is_wav=True, subsets=["train"], split="train") - metadata_train = {name: meta for name, meta in metadata.items() if name in train_tracks} - metadata_valid = {name: meta for name, meta in metadata.items() if name not in train_tracks} - train_set = Wavset(root, metadata_train, sources, - length=samples, stride=args.data_stride, - samplerate=args.samplerate, channels=args.audio_channels, - normalize=args.norm_wav) - valid_set = Wavset(root, metadata_valid, [MIXTURE] + sources, - samplerate=args.samplerate, channels=args.audio_channels, - normalize=args.norm_wav) - return train_set, valid_set diff --git a/spaces/SmokeAndAsh/4bit-gpt4-x-alpaca-13b-roleplay-lora-4bit-v2/README.md b/spaces/SmokeAndAsh/4bit-gpt4-x-alpaca-13b-roleplay-lora-4bit-v2/README.md deleted file mode 100644 index 0cf14f326afada1990b6e28a7fa14b7f91cf77a9..0000000000000000000000000000000000000000 --- a/spaces/SmokeAndAsh/4bit-gpt4-x-alpaca-13b-roleplay-lora-4bit-v2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: 4bit Gpt4 X Alpaca 13b Roleplay Lora 4bit V2 -emoji: 🐠 -colorFrom: gray -colorTo: green -sdk: gradio -sdk_version: 3.28.3 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Stanlito/Foodvision_mini/model.py b/spaces/Stanlito/Foodvision_mini/model.py deleted file mode 100644 index 2060a8a6ae4f6692cc634c067a876cb1daea285b..0000000000000000000000000000000000000000 --- a/spaces/Stanlito/Foodvision_mini/model.py +++ /dev/null @@ -1,24 +0,0 @@ -import torch -import torchvision - -from torch import nn - -def create_effnetb2_model(num_classes:int=3, # default output classes = 3 (pizza, steak, sushi) - seed:int=42): - # 1, 2, 3 Create EffNetB2 pretrained weights, transforms and model - weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT - transforms = weights.transforms() - model = torchvision.models.efficientnet_b2(weights=weights) - - # 4. Freeze all layers in the base model - for param in model.parameters(): - param.requires_grad = False - - # 5. Change classifier head with random seed for reproducibility - torch.manual_seed(seed) - model.classifier = nn.Sequential( - nn.Dropout(p=0.3, inplace=True), - nn.Linear(in_features=1408, out_features=num_classes) - ) - - return model, transforms diff --git a/spaces/Sumit7864/Image-Enhancer/README.md b/spaces/Sumit7864/Image-Enhancer/README.md deleted file mode 100644 index e4488fabc42789e879c2439582c6e1ea612e5bca..0000000000000000000000000000000000000000 --- a/spaces/Sumit7864/Image-Enhancer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Image Enhancer -emoji: 📸 -colorFrom: purple -colorTo: pink -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false ---- - -Image Enhancer Using Real-ESRGAN diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console.py deleted file mode 100644 index a1221f972839ebf87ce7e540cf378b08e1ee1ba2..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console.py +++ /dev/null @@ -1,97 +0,0 @@ -import sys -from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface - -import traceback - -# Uncomment to force PyDev standard shell. -# raise ImportError() - -from _pydev_bundle.pydev_ipython_console_011 import get_pydev_frontend - - -#======================================================================================================================= -# InterpreterInterface -#======================================================================================================================= -class InterpreterInterface(BaseInterpreterInterface): - ''' - The methods in this class should be registered in the xml-rpc server. - ''' - - def __init__(self, host, client_port, main_thread, show_banner=True, connect_status_queue=None): - BaseInterpreterInterface.__init__(self, main_thread, connect_status_queue) - self.client_port = client_port - self.host = host - self.interpreter = get_pydev_frontend(host, client_port) - self._input_error_printed = False - self.notification_succeeded = False - self.notification_tries = 0 - self.notification_max_tries = 3 - self.show_banner = show_banner - - self.notify_about_magic() - - def get_greeting_msg(self): - if self.show_banner: - self.interpreter.show_banner() - return self.interpreter.get_greeting_msg() - - def do_add_exec(self, code_fragment): - self.notify_about_magic() - if code_fragment.text.rstrip().endswith('??'): - print('IPython-->') - try: - res = bool(self.interpreter.add_exec(code_fragment.text)) - finally: - if code_fragment.text.rstrip().endswith('??'): - print('<--IPython') - - return res - - def get_namespace(self): - return self.interpreter.get_namespace() - - def getCompletions(self, text, act_tok): - return self.interpreter.getCompletions(text, act_tok) - - def close(self): - sys.exit(0) - - def notify_about_magic(self): - if not self.notification_succeeded: - self.notification_tries += 1 - if self.notification_tries > self.notification_max_tries: - return - completions = self.getCompletions("%", "%") - magic_commands = [x[0] for x in completions] - - server = self.get_server() - - if server is not None: - try: - server.NotifyAboutMagic(magic_commands, self.interpreter.is_automagic()) - self.notification_succeeded = True - except: - self.notification_succeeded = False - - def get_ipython_hidden_vars_dict(self): - try: - if hasattr(self.interpreter, 'ipython') and hasattr(self.interpreter.ipython, 'user_ns_hidden'): - user_ns_hidden = self.interpreter.ipython.user_ns_hidden - if isinstance(user_ns_hidden, dict): - # Since IPython 2 dict `user_ns_hidden` contains hidden variables and values - user_hidden_dict = user_ns_hidden.copy() - else: - # In IPython 1.x `user_ns_hidden` used to be a set with names of hidden variables - user_hidden_dict = dict([(key, val) for key, val in self.interpreter.ipython.user_ns.items() - if key in user_ns_hidden]) - - # while `_`, `__` and `___` were not initialized, they are not presented in `user_ns_hidden` - user_hidden_dict.setdefault('_', '') - user_hidden_dict.setdefault('__', '') - user_hidden_dict.setdefault('___', '') - - return user_hidden_dict - except: - # Getting IPython variables shouldn't break loading frame variables - traceback.print_exc() - diff --git a/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin_common.py b/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin_common.py deleted file mode 100644 index 94d63d408f18511179d90b3ac6f697385d1e556d..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin_common.py +++ /dev/null @@ -1,52 +0,0 @@ -import torch - -import torch.nn as nn -import numpy as np - -from .utils import activations, forward_default, get_activation, Transpose - - -def forward_swin(pretrained, x): - return forward_default(pretrained, x) - - -def _make_swin_backbone( - model, - hooks=[1, 1, 17, 1], - patch_grid=[96, 96] -): - pretrained = nn.Module() - - pretrained.model = model - pretrained.model.layers[0].blocks[hooks[0]].register_forward_hook(get_activation("1")) - pretrained.model.layers[1].blocks[hooks[1]].register_forward_hook(get_activation("2")) - pretrained.model.layers[2].blocks[hooks[2]].register_forward_hook(get_activation("3")) - pretrained.model.layers[3].blocks[hooks[3]].register_forward_hook(get_activation("4")) - - pretrained.activations = activations - - if hasattr(model, "patch_grid"): - used_patch_grid = model.patch_grid - else: - used_patch_grid = patch_grid - - patch_grid_size = np.array(used_patch_grid, dtype=int) - - pretrained.act_postprocess1 = nn.Sequential( - Transpose(1, 2), - nn.Unflatten(2, torch.Size(patch_grid_size.tolist())) - ) - pretrained.act_postprocess2 = nn.Sequential( - Transpose(1, 2), - nn.Unflatten(2, torch.Size((patch_grid_size // 2).tolist())) - ) - pretrained.act_postprocess3 = nn.Sequential( - Transpose(1, 2), - nn.Unflatten(2, torch.Size((patch_grid_size // 4).tolist())) - ) - pretrained.act_postprocess4 = nn.Sequential( - Transpose(1, 2), - nn.Unflatten(2, torch.Size((patch_grid_size // 8).tolist())) - ) - - return pretrained diff --git a/spaces/TEnngal/bingo/src/components/ui/badge.tsx b/spaces/TEnngal/bingo/src/components/ui/badge.tsx deleted file mode 100644 index d9a84b394090e5b4b3bd34f6135b9a2f2ead0aa2..0000000000000000000000000000000000000000 --- a/spaces/TEnngal/bingo/src/components/ui/badge.tsx +++ /dev/null @@ -1,36 +0,0 @@ -import * as React from 'react' -import { cva, type VariantProps } from 'class-variance-authority' - -import { cn } from '@/lib/utils' - -const badgeVariants = cva( - 'inline-flex items-center rounded-full border px-2.5 py-0.5 text-xs font-semibold transition-colors focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2', - { - variants: { - variant: { - default: - 'border-transparent bg-primary text-primary-foreground hover:bg-primary/80', - secondary: - 'border-transparent bg-secondary text-secondary-foreground hover:bg-secondary/80', - destructive: - 'border-transparent bg-destructive text-destructive-foreground hover:bg-destructive/80', - outline: 'text-foreground' - } - }, - defaultVariants: { - variant: 'default' - } - } -) - -export interface BadgeProps - extends React.HTMLAttributes, - VariantProps {} - -function Badge({ className, variant, ...props }: BadgeProps) { - return ( -
    - ) -} - -export { Badge, badgeVariants } diff --git a/spaces/TH5314/newbing/src/components/learn-more.tsx b/spaces/TH5314/newbing/src/components/learn-more.tsx deleted file mode 100644 index a64459ee7900a612292e117a6bda96ee9260990f..0000000000000000000000000000000000000000 --- a/spaces/TH5314/newbing/src/components/learn-more.tsx +++ /dev/null @@ -1,39 +0,0 @@ -import React from 'react' -import { SourceAttribution } from '@/lib/bots/bing/types' - -export interface LearnMoreProps { - sourceAttributions?: SourceAttribution[] -} - -export function LearnMore({ sourceAttributions }: LearnMoreProps) { - if (!sourceAttributions?.length) { - return null - } - - return ( - - ) -} diff --git a/spaces/Terminus0501/vits-uma-genshin-honkai/models.py b/spaces/Terminus0501/vits-uma-genshin-honkai/models.py deleted file mode 100644 index 52e15d1b9775038fd6e82b2efe6f95f51c66802d..0000000000000000000000000000000000000000 --- a/spaces/Terminus0501/vits-uma-genshin-honkai/models.py +++ /dev/null @@ -1,534 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - device = next(self.parameters()).device # 获取模型所在的设备 - x, m_p, logs_p, x_mask = self.enc_p(x.to(device), x_lengths.to(device)) - if self.n_speakers > 0: - g = self.emb_g(sid.to(device)).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/TexR6/AttentionMaps/README.md b/spaces/TexR6/AttentionMaps/README.md deleted file mode 100644 index 17f7673083f8da5dd8154ea90d5e0aab50fb2f54..0000000000000000000000000000000000000000 --- a/spaces/TexR6/AttentionMaps/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: ImageNet Attention Maps -emoji: 👀 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Theivaprakasham/facedetect/README.md b/spaces/Theivaprakasham/facedetect/README.md deleted file mode 100644 index 26d880081cf4f44d125af51331e5ab7a9b114c5e..0000000000000000000000000000000000000000 --- a/spaces/Theivaprakasham/facedetect/README.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Multi Face Detection -emoji: 🌖 -colorFrom: yellow -colorTo: gray -sdk: gradio -app_file: app.py -pinned: false -license: apache-2.0 ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Toor1989/Toor1989/README.md b/spaces/Toor1989/Toor1989/README.md deleted file mode 100644 index ff5010b4b79d50c50593a5bf1b96e8cd5d0b7339..0000000000000000000000000000000000000000 --- a/spaces/Toor1989/Toor1989/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Toor1989 -emoji: 🔥 -colorFrom: blue -colorTo: purple -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false -license: other ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/UMich-siads699-fa22-spotamood/spotamood/pages/songs_attributes.md b/spaces/UMich-siads699-fa22-spotamood/spotamood/pages/songs_attributes.md deleted file mode 100644 index 67a1a21efff4b8fae0ffe3a503d74ac380d8dd71..0000000000000000000000000000000000000000 --- a/spaces/UMich-siads699-fa22-spotamood/spotamood/pages/songs_attributes.md +++ /dev/null @@ -1,5 +0,0 @@ -#### Songs Attributes - -With the data scraped from the Spotify API based on the list of songs from Wasabi Songs dataset, approximately 2 million songs, were filtered by the popularity of the artist from billboard's top 100 since 1948. We look at the attributes of the song including danceability, energy, speechiness, acousticness, liveness, and valence. The distribution of these properties shows quite a good distribution of the data and the songs that we have for our model. Although speechiness and liveness tend to be on the lower side, it does not really impact our songs distribution of mood range that much as the liveness parameter is the detection of the presence of the audience in the audio, and speechiness represents the spoken words that are not melodic in the audio. These 2 do not seem to impact the range of mood that much versus danceability, energy, acousticness, and valence which distributes quite well in the middle. Hence, it can be safe to assume that we can get a normally distributed songs' attribute if we sample from the dataset for our model representation. - -Moreover, the distribution of scatter plots in the pairplots among the attributes in the chart below shows that there are no significant correlations between attributes. This means that songs attribute will not present multicollinearity issues in our lyrics clustering analysis. \ No newline at end of file diff --git a/spaces/VaneM/ChatBot-Text-to-Speach-es/app.py b/spaces/VaneM/ChatBot-Text-to-Speach-es/app.py deleted file mode 100644 index bfa88da306e0d66b5963f7196616bb0f7d562edc..0000000000000000000000000000000000000000 --- a/spaces/VaneM/ChatBot-Text-to-Speach-es/app.py +++ /dev/null @@ -1,89 +0,0 @@ -import gradio as gr -from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration, pipeline -import torch -from gtts import gTTS - - -# Cargamos el modelo para el chat -model_name = 'facebook/blenderbot-400M-distill' -tokenizer = BlenderbotTokenizer.from_pretrained(model_name) -model = BlenderbotForConditionalGeneration.from_pretrained(model_name) - - -# Cargamos el traductor de ingles a español -english_model_name = "Helsinki-NLP/opus-mt-en-es" -translator_en_es = pipeline("translation", model=english_model_name) - -# Cargamos el traductor de español a ingles -spanish_model_name = "Helsinki-NLP/opus-mt-es-en" -translator_es_en = pipeline("translation", model=spanish_model_name) - - -def take_last_tokens(inputs, note_history, history): - """Filtrar los últimos 128 tokens""" - if inputs['input_ids'].shape[1] > 128: - inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()]) - inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()]) - note_history = [' '.join(note_history[0].split(' ')[2:])] - history = history[1:] - return inputs, note_history, history - -def add_note_to_history(note, note_history): - """Añadir una nota a la información histórica del chat""" - note_history.append(note) - note_history = ' '.join(note_history) - return [note_history] - - -def predict(text, history): - history = history or [] - if history: - history_useful = [' '.join([str(a[0])+' '+str(a[1]) for a in history])] - else: - history_useful = [] - # Traducimos el texto ingresado a ingles - text_input = translator_es_en(text)[0]['translation_text'] - - # comparamos con el historial y codificamos la nueva entrada del usuario - history_useful = add_note_to_history(text_input, history_useful) - inputs = tokenizer(history_useful, return_tensors="pt") - inputs, history_useful, history = take_last_tokens(inputs, history_useful, history) - - # Generar una respuesta - reply_ids = model.generate(**inputs) - response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0] - # sumamos la respuesta al historial del chat - history_useful = add_note_to_history(response, history_useful) - list_history = history_useful[0].split(' ') - history.append((list_history[-2], list_history[-1])) - - # pasamos el resultado a gtts para obtener el audio - spanish_text = translator_en_es(response) - result_es = spanish_text[0]['translation_text'] - sound_file = 'output.wav' - tts = gTTS(result_es, lang="es", tld='com.mx') - tts.save(sound_file) - return sound_file, history - -description = """ -

    Inicia el chat con la IA que ha sido entrenada para hablar contigo sobre lo que quieras.

    -

    ¡Hablemos!

    -""" -article = """Instrucciones: -\n1. Inserte el texto en la casilla de texto -\n2. Presionar 'Enviar' y esperar la respuesta -\n4. Para enviar otro texto borrar el actual y volver al punto 1. - -El modelo usa: -- Modelo conversacional [facebook/blenderbot-400M-distill](https://huggingface.co/facebook/blenderbot-400M-distill?text=Hey+my+name+is+Julien%21+How+are+you%3F), -- Para las traducciones [Helsinki-NLP](https://huggingface.co/Helsinki-NLP) -- Para la respuesta de voz [gTTS](https://pypi.org/project/gTTS/) -\n... y mucha magia ☺ -""" - -gr.Interface(fn=predict, - title="ChatBot Text-to-Speach en Español", - inputs= [gr.Textbox("", max_lines = 5, label = "Inserte su texto aqui") , 'state'], - outputs = [gr.Audio(type='file', label="Respuesta de IA en forma de audio"), 'state'], - description = description , - article = article).launch(debug=True) \ No newline at end of file diff --git a/spaces/WindVChen/INR-Harmon/inference.py b/spaces/WindVChen/INR-Harmon/inference.py deleted file mode 100644 index bb689c81a120247bfa9afa6b217b87b25bc0de6a..0000000000000000000000000000000000000000 --- a/spaces/WindVChen/INR-Harmon/inference.py +++ /dev/null @@ -1,236 +0,0 @@ -import os -import argparse - -import albumentations -from albumentations import Resize - -import torch -import torch.backends.cudnn as cudnn -import torchvision.transforms as transforms -from torch.utils.data import DataLoader - -from model.build_model import build_model -from datasets.build_dataset import dataset_generator - -from utils import misc, metrics - - -def parse_args(): - parser = argparse.ArgumentParser() - - parser.add_argument('--workers', type=int, default=1, - metavar='N', help='Dataloader threads.') - - parser.add_argument('--batch_size', type=int, default=1, - help='You can override model batch size by specify positive number.') - - parser.add_argument('--device', type=str, default='cuda', - help="Whether use cuda, 'cuda' or 'cpu'.") - - parser.add_argument('--save_path', type=str, default="./logs", - help='Where to save logs and checkpoints.') - - parser.add_argument('--dataset_path', type=str, default=r".\iHarmony4", - help='Dataset path.') - - parser.add_argument('--base_size', type=int, default=256, - help='Base size. Resolution of the image input into the Encoder') - - parser.add_argument('--input_size', type=int, default=256, - help='Input size. Resolution of the image that want to be generated by the Decoder') - - parser.add_argument('--INR_input_size', type=int, default=256, - help='INR input size. Resolution of the image that want to be generated by the Decoder. ' - 'Should be the same as `input_size`') - - parser.add_argument('--INR_MLP_dim', type=int, default=32, - help='Number of channels for INR linear layer.') - - parser.add_argument('--LUT_dim', type=int, default=7, - help='Dim of the output LUT. Refer to https://ieeexplore.ieee.org/abstract/document/9206076') - - parser.add_argument('--activation', type=str, default='leakyrelu_pe', - help='INR activation layer type: leakyrelu_pe, sine') - - parser.add_argument('--pretrained', type=str, - default=r'.\pretrained_models\Resolution_RAW_iHarmony4.pth', - help='Pretrained weight path') - - parser.add_argument('--param_factorize_dim', type=int, - default=10, - help='The intermediate dimensions of the factorization of the predicted MLP parameters. ' - 'Refer to https://arxiv.org/abs/2011.12026') - - parser.add_argument('--embedding_type', type=str, - default="CIPS_embed", - help='Which embedding_type to use.') - - parser.add_argument('--optim', type=str, - default='adamw', - help='Which optimizer to use.') - - parser.add_argument('--INRDecode', action="store_false", - help='Whether INR decoder. Set it to False if you want to test the baseline ' - '(https://github.com/SamsungLabs/image_harmonization)') - - parser.add_argument('--isMoreINRInput', action="store_false", - help='Whether to cat RGB and mask. See Section 3.4 in the paper.') - - parser.add_argument('--hr_train', action="store_true", - help='Whether use hr_train. See section 3.4 in the paper.') - - parser.add_argument('--isFullRes', action="store_true", - help='Whether for original resolution. See section 3.4 in the paper.') - - opt = parser.parse_args() - - opt.save_path = misc.increment_path(os.path.join(opt.save_path, "test1")) - - return opt - - -def inference(val_loader, model, logger, opt): - current_process = 10 - model.eval() - - metric_log = { - 'HAdobe5k': {'Samples': 0, 'MSE': 0, 'fMSE': 0, 'PSNR': 0, 'SSIM': 0}, - 'HCOCO': {'Samples': 0, 'MSE': 0, 'fMSE': 0, 'PSNR': 0, 'SSIM': 0}, - 'Hday2night': {'Samples': 0, 'MSE': 0, 'fMSE': 0, 'PSNR': 0, 'SSIM': 0}, - 'HFlickr': {'Samples': 0, 'MSE': 0, 'fMSE': 0, 'PSNR': 0, 'SSIM': 0}, - 'All': {'Samples': 0, 'MSE': 0, 'fMSE': 0, 'PSNR': 0, 'SSIM': 0}, - } - - lut_metric_log = { - 'HAdobe5k': {'Samples': 0, 'MSE': 0, 'fMSE': 0, 'PSNR': 0, 'SSIM': 0}, - 'HCOCO': {'Samples': 0, 'MSE': 0, 'fMSE': 0, 'PSNR': 0, 'SSIM': 0}, - 'Hday2night': {'Samples': 0, 'MSE': 0, 'fMSE': 0, 'PSNR': 0, 'SSIM': 0}, - 'HFlickr': {'Samples': 0, 'MSE': 0, 'fMSE': 0, 'PSNR': 0, 'SSIM': 0}, - 'All': {'Samples': 0, 'MSE': 0, 'fMSE': 0, 'PSNR': 0, 'SSIM': 0}, - } - - for step, batch in enumerate(val_loader): - composite_image = batch['composite_image'].to(opt.device) - real_image = batch['real_image'].to(opt.device) - mask = batch['mask'].to(opt.device) - category = batch['category'] - - fg_INR_coordinates = batch['fg_INR_coordinates'].to(opt.device) - - with torch.no_grad(): - fg_content_bg_appearance_construct, _, lut_transform_image = model( - composite_image, - mask, - fg_INR_coordinates, - ) - - if opt.INRDecode: - pred_fg_image = fg_content_bg_appearance_construct[-1] - else: - pred_fg_image = misc.lin2img(fg_content_bg_appearance_construct, - val_loader.dataset.INR_dataset.size) if fg_content_bg_appearance_construct is not None else None - - if not opt.INRDecode: - pred_harmonized_image = None - else: - pred_harmonized_image = pred_fg_image * (mask > 100 / 255.) + real_image * (~(mask > 100 / 255.)) - lut_transform_image = lut_transform_image * (mask > 100 / 255.) + real_image * (~(mask > 100 / 255.)) - - misc.visualize(real_image, composite_image, mask, pred_fg_image, - pred_harmonized_image, lut_transform_image, opt, -1, show=False, - wandb=False, isAll=True, step=step) - - if opt.INRDecode: - mse, fmse, psnr, ssim = metrics.calc_metrics(misc.normalize(pred_harmonized_image, opt, 'inv'), - misc.normalize(real_image, opt, 'inv'), mask) - - lut_mse, lut_fmse, lut_psnr, lut_ssim = metrics.calc_metrics(misc.normalize(lut_transform_image, opt, 'inv'), - misc.normalize(real_image, opt, 'inv'), mask) - - for idx in range(len(category)): - if opt.INRDecode: - metric_log[category[idx]]['Samples'] += 1 - metric_log[category[idx]]['MSE'] += mse[idx] - metric_log[category[idx]]['fMSE'] += fmse[idx] - metric_log[category[idx]]['PSNR'] += psnr[idx] - metric_log[category[idx]]['SSIM'] += ssim[idx] - - metric_log['All']['Samples'] += 1 - metric_log['All']['MSE'] += mse[idx] - metric_log['All']['fMSE'] += fmse[idx] - metric_log['All']['PSNR'] += psnr[idx] - metric_log['All']['SSIM'] += ssim[idx] - - lut_metric_log[category[idx]]['Samples'] += 1 - lut_metric_log[category[idx]]['MSE'] += lut_mse[idx] - lut_metric_log[category[idx]]['fMSE'] += lut_fmse[idx] - lut_metric_log[category[idx]]['PSNR'] += lut_psnr[idx] - lut_metric_log[category[idx]]['SSIM'] += lut_ssim[idx] - - lut_metric_log['All']['Samples'] += 1 - lut_metric_log['All']['MSE'] += lut_mse[idx] - lut_metric_log['All']['fMSE'] += lut_fmse[idx] - lut_metric_log['All']['PSNR'] += lut_psnr[idx] - lut_metric_log['All']['SSIM'] += lut_ssim[idx] - - if (step + 1) / len(val_loader) * 100 >= current_process: - logger.info(f'Processing: {current_process}') - current_process += 10 - - logger.info('=========================') - for key in metric_log.keys(): - if opt.INRDecode: - msg = f"{key}-'MSE': {metric_log[key]['MSE'] / metric_log[key]['Samples']:.2f}\n" \ - f"{key}-'fMSE': {metric_log[key]['fMSE'] / metric_log[key]['Samples']:.2f}\n" \ - f"{key}-'PSNR': {metric_log[key]['PSNR'] / metric_log[key]['Samples']:.2f}\n" \ - f"{key}-'SSIM': {metric_log[key]['SSIM'] / metric_log[key]['Samples']:.4f}\n" \ - f"{key}-'LUT_MSE': {lut_metric_log[key]['MSE'] / lut_metric_log[key]['Samples']:.2f}\n" \ - f"{key}-'LUT_fMSE': {lut_metric_log[key]['fMSE'] / lut_metric_log[key]['Samples']:.2f}\n" \ - f"{key}-'LUT_PSNR': {lut_metric_log[key]['PSNR'] / lut_metric_log[key]['Samples']:.2f}\n" \ - f"{key}-'LUT_SSIM': {lut_metric_log[key]['SSIM'] / lut_metric_log[key]['Samples']:.4f}\n" - else: - msg = f"{key}-'LUT_MSE': {lut_metric_log[key]['MSE'] / lut_metric_log[key]['Samples']:.2f}\n" \ - f"{key}-'LUT_fMSE': {lut_metric_log[key]['fMSE'] / lut_metric_log[key]['Samples']:.2f}\n" \ - f"{key}-'LUT_PSNR': {lut_metric_log[key]['PSNR'] / lut_metric_log[key]['Samples']:.2f}\n" \ - f"{key}-'LUT_SSIM': {lut_metric_log[key]['SSIM'] / lut_metric_log[key]['Samples']:.4f}\n" - - logger.info(msg) - - logger.info('=========================') - - -def main_process(opt): - logger = misc.create_logger(os.path.join(opt.save_path, "log.txt")) - cudnn.benchmark = True - - valset_path = os.path.join(opt.dataset_path, "IHD_test.txt") - - opt.transform_mean = [.5, .5, .5] - opt.transform_var = [.5, .5, .5] - torch_transform = transforms.Compose([transforms.ToTensor(), - transforms.Normalize(opt.transform_mean, opt.transform_var)]) - - valset_alb_transform = albumentations.Compose([Resize(opt.input_size, opt.input_size)], - additional_targets={'real_image': 'image', 'object_mask': 'image'}) - - valset = dataset_generator(valset_path, valset_alb_transform, torch_transform, opt, mode='Val') - - val_loader = DataLoader(valset, opt.batch_size, shuffle=False, drop_last=False, pin_memory=True, - num_workers=opt.workers, persistent_workers=True) - - model = build_model(opt).to(opt.device) - logger.info(f"Load pretrained weight from {opt.pretrained}") - - load_dict = torch.load(opt.pretrained)['model'] - for k in load_dict.keys(): - if k not in model.state_dict().keys(): - print(f"Skip {k}") - model.load_state_dict(load_dict, strict=False) - - inference(val_loader, model, logger, opt) - - -if __name__ == '__main__': - opt = parse_args() - os.makedirs(opt.save_path, exist_ok=True) - main_process(opt) \ No newline at end of file diff --git a/spaces/Xhaheen/Hyper_Bot_ben/README.md b/spaces/Xhaheen/Hyper_Bot_ben/README.md deleted file mode 100644 index e809ba3833083e4d9abe7d5259950f682a8a9cae..0000000000000000000000000000000000000000 --- a/spaces/Xhaheen/Hyper_Bot_ben/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Hyper Bot -emoji: 🤖 -colorFrom: gray -colorTo: yellow -sdk: static -pinned: false -duplicated_from: CobaltZvc/Hyper_Bot ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/XzJosh/Azuma-Bert-VITS2/text/__init__.py b/spaces/XzJosh/Azuma-Bert-VITS2/text/__init__.py deleted file mode 100644 index 7566bf351ca9b95af9cdc6d729557a9da083800f..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Azuma-Bert-VITS2/text/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from text.symbols import * - - -_symbol_to_id = {s: i for i, s in enumerate(symbols)} - -def cleaned_text_to_sequence(cleaned_text, tones, language): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - phones = [_symbol_to_id[symbol] for symbol in cleaned_text] - tone_start = language_tone_start_map[language] - tones = [i + tone_start for i in tones] - lang_id = language_id_map[language] - lang_ids = [lang_id for i in phones] - return phones, tones, lang_ids - -def get_bert(norm_text, word2ph, language): - from .chinese_bert import get_bert_feature as zh_bert - from .english_bert_mock import get_bert_feature as en_bert - lang_bert_func_map = { - 'ZH': zh_bert, - 'EN': en_bert - } - bert = lang_bert_func_map[language](norm_text, word2ph) - return bert diff --git a/spaces/XzJosh/Nana7mi-Bert-VITS2/commons.py b/spaces/XzJosh/Nana7mi-Bert-VITS2/commons.py deleted file mode 100644 index 9ad0444b61cbadaa388619986c2889c707d873ce..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Nana7mi-Bert-VITS2/commons.py +++ /dev/null @@ -1,161 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/XzJosh/Spade-Bert-VITS2/text/english.py b/spaces/XzJosh/Spade-Bert-VITS2/text/english.py deleted file mode 100644 index 781d0a56cef71f66fc67db51d76538be90d3ddd2..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Spade-Bert-VITS2/text/english.py +++ /dev/null @@ -1,138 +0,0 @@ -import pickle -import os -import re -from g2p_en import G2p -from string import punctuation - -from text import symbols - -current_file_path = os.path.dirname(__file__) -CMU_DICT_PATH = os.path.join(current_file_path, 'cmudict.rep') -CACHE_PATH = os.path.join(current_file_path, 'cmudict_cache.pickle') -_g2p = G2p() - -arpa = {'AH0', 'S', 'AH1', 'EY2', 'AE2', 'EH0', 'OW2', 'UH0', 'NG', 'B', 'G', 'AY0', 'M', 'AA0', 'F', 'AO0', 'ER2', 'UH1', 'IY1', 'AH2', 'DH', 'IY0', 'EY1', 'IH0', 'K', 'N', 'W', 'IY2', 'T', 'AA1', 'ER1', 'EH2', 'OY0', 'UH2', 'UW1', 'Z', 'AW2', 'AW1', 'V', 'UW2', 'AA2', 'ER', 'AW0', 'UW0', 'R', 'OW1', 'EH1', 'ZH', 'AE0', 'IH2', 'IH', 'Y', 'JH', 'P', 'AY1', 'EY0', 'OY2', 'TH', 'HH', 'D', 'ER0', 'CH', 'AO1', 'AE1', 'AO2', 'OY1', 'AY2', 'IH1', 'OW0', 'L', 'SH'} - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def read_dict(): - g2p_dict = {} - start_line = 49 - with open(CMU_DICT_PATH) as f: - line = f.readline() - line_index = 1 - while line: - if line_index >= start_line: - line = line.strip() - word_split = line.split(' ') - word = word_split[0] - - syllable_split = word_split[1].split(' - ') - g2p_dict[word] = [] - for syllable in syllable_split: - phone_split = syllable.split(' ') - g2p_dict[word].append(phone_split) - - line_index = line_index + 1 - line = f.readline() - - return g2p_dict - - -def cache_dict(g2p_dict, file_path): - with open(file_path, 'wb') as pickle_file: - pickle.dump(g2p_dict, pickle_file) - - -def get_dict(): - if os.path.exists(CACHE_PATH): - with open(CACHE_PATH, 'rb') as pickle_file: - g2p_dict = pickle.load(pickle_file) - else: - g2p_dict = read_dict() - cache_dict(g2p_dict, CACHE_PATH) - - return g2p_dict - -eng_dict = get_dict() - -def refine_ph(phn): - tone = 0 - if re.search(r'\d$', phn): - tone = int(phn[-1]) + 1 - phn = phn[:-1] - return phn.lower(), tone - -def refine_syllables(syllables): - tones = [] - phonemes = [] - for phn_list in syllables: - for i in range(len(phn_list)): - phn = phn_list[i] - phn, tone = refine_ph(phn) - phonemes.append(phn) - tones.append(tone) - return phonemes, tones - - -def text_normalize(text): - # todo: eng text normalize - return text - -def g2p(text): - - phones = [] - tones = [] - words = re.split(r"([,;.\-\?\!\s+])", text) - for w in words: - if w.upper() in eng_dict: - phns, tns = refine_syllables(eng_dict[w.upper()]) - phones += phns - tones += tns - else: - phone_list = list(filter(lambda p: p != " ", _g2p(w))) - for ph in phone_list: - if ph in arpa: - ph, tn = refine_ph(ph) - phones.append(ph) - tones.append(tn) - else: - phones.append(ph) - tones.append(0) - # todo: implement word2ph - word2ph = [1 for i in phones] - - phones = [post_replace_ph(i) for i in phones] - return phones, tones, word2ph - -if __name__ == "__main__": - # print(get_dict()) - # print(eng_word_to_phoneme("hello")) - print(g2p("In this paper, we propose 1 DSPGAN, a GAN-based universal vocoder.")) - # all_phones = set() - # for k, syllables in eng_dict.items(): - # for group in syllables: - # for ph in group: - # all_phones.add(ph) - # print(all_phones) \ No newline at end of file diff --git a/spaces/XzJosh/nine1-Bert-VITS2/text/__init__.py b/spaces/XzJosh/nine1-Bert-VITS2/text/__init__.py deleted file mode 100644 index 7566bf351ca9b95af9cdc6d729557a9da083800f..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/nine1-Bert-VITS2/text/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from text.symbols import * - - -_symbol_to_id = {s: i for i, s in enumerate(symbols)} - -def cleaned_text_to_sequence(cleaned_text, tones, language): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - phones = [_symbol_to_id[symbol] for symbol in cleaned_text] - tone_start = language_tone_start_map[language] - tones = [i + tone_start for i in tones] - lang_id = language_id_map[language] - lang_ids = [lang_id for i in phones] - return phones, tones, lang_ids - -def get_bert(norm_text, word2ph, language): - from .chinese_bert import get_bert_feature as zh_bert - from .english_bert_mock import get_bert_feature as en_bert - lang_bert_func_map = { - 'ZH': zh_bert, - 'EN': en_bert - } - bert = lang_bert_func_map[language](norm_text, word2ph) - return bert diff --git a/spaces/XzJosh/nine1-Bert-VITS2/text/english_bert_mock.py b/spaces/XzJosh/nine1-Bert-VITS2/text/english_bert_mock.py deleted file mode 100644 index 3b894ced5b6d619a18d6bdd7d7606ba9e6532050..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/nine1-Bert-VITS2/text/english_bert_mock.py +++ /dev/null @@ -1,5 +0,0 @@ -import torch - - -def get_bert_feature(norm_text, word2ph): - return torch.zeros(1024, sum(word2ph)) diff --git a/spaces/YUANAI/DiffspeechResearch/tasks/tts/diffspeech.py b/spaces/YUANAI/DiffspeechResearch/tasks/tts/diffspeech.py deleted file mode 100644 index 283bf9b62fed0c5f68a9f82887543b9413dd8955..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/tasks/tts/diffspeech.py +++ /dev/null @@ -1,111 +0,0 @@ -import torch - -from modules.tts.diffspeech.shallow_diffusion_tts import GaussianDiffusion -from tasks.tts.fs2_orig import FastSpeech2OrigTask - -import utils -from utils.commons.hparams import hparams -from utils.commons.ckpt_utils import load_ckpt -from utils.audio.pitch.utils import denorm_f0 - - -class DiffSpeechTask(FastSpeech2OrigTask): - def build_tts_model(self): - # get min and max - # import torch - # from tqdm import tqdm - # v_min = torch.ones([80]) * 100 - # v_max = torch.ones([80]) * -100 - # for i, ds in enumerate(tqdm(self.dataset_cls('train'))): - # v_max = torch.max(torch.max(ds['mel'].reshape(-1, 80), 0)[0], v_max) - # v_min = torch.min(torch.min(ds['mel'].reshape(-1, 80), 0)[0], v_min) - # if i % 100 == 0: - # print(i, v_min, v_max) - # print('final', v_min, v_max) - dict_size = len(self.token_encoder) - self.model = GaussianDiffusion(dict_size, hparams) - if hparams['fs2_ckpt'] != '': - load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True) - # for k, v in self.model.fs2.named_parameters(): - # if 'predictor' not in k: - # v.requires_grad = False - # or - for k, v in self.model.fs2.named_parameters(): - v.requires_grad = False - - def build_optimizer(self, model): - self.optimizer = optimizer = torch.optim.AdamW( - filter(lambda p: p.requires_grad, model.parameters()), - lr=hparams['lr'], - betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), - weight_decay=hparams['weight_decay']) - return optimizer - - def build_scheduler(self, optimizer): - return torch.optim.lr_scheduler.StepLR(optimizer, hparams['decay_steps'], gamma=0.5) - - def run_model(self, sample, infer=False, *args, **kwargs): - txt_tokens = sample['txt_tokens'] # [B, T_t] - spk_embed = sample.get('spk_embed') - spk_id = sample.get('spk_ids') - if not infer: - target = sample['mels'] # [B, T_s, 80] - mel2ph = sample['mel2ph'] # [B, T_s] - f0 = sample.get('f0') - uv = sample.get('uv') - output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, - ref_mels=target, f0=f0, uv=uv, infer=False) - losses = {} - if 'diff_loss' in output: - losses['mel'] = output['diff_loss'] - self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) - if hparams['use_pitch_embed']: - self.add_pitch_loss(output, sample, losses) - return losses, output - else: - use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) - use_gt_f0 = kwargs.get('infer_use_gt_f0', hparams['use_gt_f0']) - mel2ph, uv, f0 = None, None, None - if use_gt_dur: - mel2ph = sample['mel2ph'] - if use_gt_f0: - f0 = sample['f0'] - uv = sample['uv'] - output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, - ref_mels=None, f0=f0, uv=uv, infer=True) - return output - - def save_valid_result(self, sample, batch_idx, model_out): - sr = hparams['audio_sample_rate'] - f0_gt = None - # mel_out = model_out['mel_out'] - if sample.get('f0') is not None: - f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) - # self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt) - if self.global_step > 0: - # wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt) - # self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr) - # with gt duration - model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True) - dur_info = self.get_plot_dur_info(sample, model_out) - del dur_info['dur_pred'] - wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) - self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'diffmel_gdur_{batch_idx}', - dur_info=dur_info, f0s=f0_gt) - self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'][0], f'fs2mel_gdur_{batch_idx}', - dur_info=dur_info, f0s=f0_gt) # gt mel vs. fs2 mel - - # with pred duration - if not hparams['use_gt_dur']: - model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False) - dur_info = self.get_plot_dur_info(sample, model_out) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}', - dur_info=dur_info, f0s=f0_gt) - wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) - self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr) - # gt wav - if self.global_step <= hparams['valid_infer_interval']: - mel_gt = sample['mels'][0].cpu() - wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) - self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr) diff --git a/spaces/YazawaSunrise/so-vits-svc-LoveLive/data_utils.py b/spaces/YazawaSunrise/so-vits-svc-LoveLive/data_utils.py deleted file mode 100644 index 1f5d4e257116871588720f296ad78f0bc31e7b24..0000000000000000000000000000000000000000 --- a/spaces/YazawaSunrise/so-vits-svc-LoveLive/data_utils.py +++ /dev/null @@ -1,154 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import commons -from mel_processing import spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text, transform - -# import h5py - - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths, hparams): - self.audiopaths = load_filepaths_and_text(audiopaths) - self.max_wav_value = hparams.data.max_wav_value - self.sampling_rate = hparams.data.sampling_rate - self.filter_length = hparams.data.filter_length - self.hop_length = hparams.data.hop_length - self.win_length = hparams.data.win_length - self.sampling_rate = hparams.data.sampling_rate - self.use_sr = hparams.train.use_sr - self.spec_len = hparams.train.max_speclen - self.spk_map = hparams.spk - - random.seed(1234) - random.shuffle(self.audiopaths) - - def get_audio(self, filename): - filename = filename.replace("\\", "/") - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - - spk = filename.split("/")[-2] - spk = torch.LongTensor([self.spk_map[spk]]) - - c = torch.load(filename + ".soft.pt").squeeze(0) - c = torch.repeat_interleave(c, repeats=2, dim=1) - - f0 = np.load(filename + ".f0.npy") - f0 = torch.FloatTensor(f0) - lmin = min(c.size(-1), spec.size(-1), f0.shape[0]) - assert abs(c.size(-1) - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape, filename) - assert abs(lmin - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape) - assert abs(lmin - c.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape) - spec, c, f0 = spec[:, :lmin], c[:, :lmin], f0[:lmin] - audio_norm = audio_norm[:, :lmin * self.hop_length] - _spec, _c, _audio_norm, _f0 = spec, c, audio_norm, f0 - while spec.size(-1) < self.spec_len: - spec = torch.cat((spec, _spec), -1) - c = torch.cat((c, _c), -1) - f0 = torch.cat((f0, _f0), -1) - audio_norm = torch.cat((audio_norm, _audio_norm), -1) - start = random.randint(0, spec.size(-1) - self.spec_len) - end = start + self.spec_len - spec = spec[:, start:end] - c = c[:, start:end] - f0 = f0[start:end] - audio_norm = audio_norm[:, start * self.hop_length:end * self.hop_length] - - return c, f0, spec, audio_norm, spk - - def __getitem__(self, index): - return self.get_audio(self.audiopaths[index][0]) - - def __len__(self): - return len(self.audiopaths) - - -class EvalDataLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths, hparams): - self.audiopaths = load_filepaths_and_text(audiopaths) - self.max_wav_value = hparams.data.max_wav_value - self.sampling_rate = hparams.data.sampling_rate - self.filter_length = hparams.data.filter_length - self.hop_length = hparams.data.hop_length - self.win_length = hparams.data.win_length - self.sampling_rate = hparams.data.sampling_rate - self.use_sr = hparams.train.use_sr - self.audiopaths = self.audiopaths[:5] - self.spk_map = hparams.spk - - - def get_audio(self, filename): - filename = filename.replace("\\", "/") - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - - spk = filename.split("/")[-2] - spk = torch.LongTensor([self.spk_map[spk]]) - - c = torch.load(filename + ".soft.pt").squeeze(0) - - c = torch.repeat_interleave(c, repeats=2, dim=1) - - f0 = np.load(filename + ".f0.npy") - f0 = torch.FloatTensor(f0) - lmin = min(c.size(-1), spec.size(-1), f0.shape[0]) - assert abs(c.size(-1) - spec.size(-1)) < 4, (c.size(-1), spec.size(-1), f0.shape) - assert abs(f0.shape[0] - spec.shape[-1]) < 4, (c.size(-1), spec.size(-1), f0.shape) - spec, c, f0 = spec[:, :lmin], c[:, :lmin], f0[:lmin] - audio_norm = audio_norm[:, :lmin * self.hop_length] - - return c, f0, spec, audio_norm, spk - - def __getitem__(self, index): - return self.get_audio(self.audiopaths[index][0]) - - def __len__(self): - return len(self.audiopaths) - diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/latent_diffusion_uncond/__init__.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/latent_diffusion_uncond/__init__.py deleted file mode 100644 index 0826ca7536c706f9bc1f310c157068efbca7f0b3..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/latent_diffusion_uncond/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# flake8: noqa -from .pipeline_latent_diffusion_uncond import LDMPipeline diff --git a/spaces/Yuliang/ICON/lib/pymaf/utils/__init__.py b/spaces/Yuliang/ICON/lib/pymaf/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ZJunTvT/ZJunChat/locale/extract_locale.py b/spaces/ZJunTvT/ZJunChat/locale/extract_locale.py deleted file mode 100644 index 32b0924bd6dffe150cb3e481ddadef836b91b83c..0000000000000000000000000000000000000000 --- a/spaces/ZJunTvT/ZJunChat/locale/extract_locale.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import json -import re - -# Define regular expression patterns -pattern = r'i18n\((\"{3}.*?\"{3}|\".*?\")\)' - -# Load the .py file -with open('ChuanhuChatbot.py', 'r', encoding='utf-8') as f: - contents = f.read() - -# Load the .py files in the modules folder -for filename in os.listdir("modules"): - if filename.endswith(".py"): - with open(os.path.join("modules", filename), "r", encoding="utf-8") as f: - contents += f.read() - -# Matching with regular expressions -matches = re.findall(pattern, contents, re.DOTALL) - -# Convert to key/value pairs -data = {match.strip('()"'): '' for match in matches} - -# Save as a JSON file -with open('labels.json', 'w', encoding='utf-8') as f: - json.dump(data, f, ensure_ascii=False, indent=4) \ No newline at end of file diff --git a/spaces/abdvl/datahub_qa_bot/docs/how/configuring-authorization-with-apache-ranger.md b/spaces/abdvl/datahub_qa_bot/docs/how/configuring-authorization-with-apache-ranger.md deleted file mode 100644 index 26d3be6d358b2e34a58fdd76895b9f8a7c612c0f..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/how/configuring-authorization-with-apache-ranger.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -title: "Configuring Authorization with Apache Ranger" -hide_title: true ---- -# Configuring Authorization with Apache Ranger -DataHub integration with Apache Ranger allows DataHub Authorization policies to be controlled inside Apache Ranger. -Admins can create users, groups and roles on Apache Ranger, and then assign them to Ranger policies to control the authorization of requests to DataHub. - -We'll break down configuration of the DataHub Apache Ranger Plugin into two parts: - -1. Configuring your Apache Ranger Deployment -2. Configuring your DataHub Deployment - -> Disclaimer: All configurations shown in this documented were tested against [Privacera Platform](https://privacera.com/) v6.3.0.1. - -# Prerequisites -- User identifier present in CorpRole URN should be the name of the AD/LDAP user. For example in URN **urn:li:corpuser:datahub**, the **datahub** should present as name of user in AD/LDAP -- Apache Ranger and DataHub are configured for authentication via same IDP (either LDAP + JaaS or OIDC SSO) -- Apache Ranger service available via HTTP -- Basic authentication is enabled on Apache Ranger Service - -# Configuration - -## Configuring your Apache Ranger Deployment - -Perform the following steps to configure an Apache Ranger deployment to support creating access policies compatible with DataHub. -For kubernetes example command, please replace the <ranger-pod-name> and <namespace> as per your environment. - -1. Download the **datahub-ranger-plugin** from [Maven](https://mvnrepository.com/artifact/io.acryl/datahub-ranger-plugin) -2. Create a "datahub" directory inside the "ranger-plugins" directory where Apache Ranger is deployed. For example, to do this in a Privacera container - - *Docker command:* - ```bash - docker exec privacera_ranger_1 mkdir ews/webapp/WEB-INF/classes/ranger-plugins/datahub - ``` - *Kubernetes command:* - ```bash - kubectl exec mkdir ews/webapp/WEB-INF/classes/ranger-plugins/datahub -n - ``` -3. Copy the downloaded **datahub-ranger-plugin** jar into the newly created "datahub" directory. For example, to do this in a Privacera container - - *Docker command:* - ```bash - docker cp datahub-ranger-plugin-.jar privacera_ranger_1:/opt/ranger/ranger-2.1.0-admin/ews/webapp/WEB-INF/classes/ranger-plugins/datahub/ - ``` - *Kubernetes command:* - ```bash - kubectl cp datahub-ranger-plugin-.jar :/opt/ranger/ranger-2.1.0-admin/ews/webapp/WEB-INF/classes/ranger-plugins/datahub/ -n - ``` -4. Download the [service definition file](https://github.com/acryldata/datahub-ranger-auth-plugin/blob/main/datahub-ranger-plugin/conf/servicedef.json). This service definition is the ranger service definition JSON file for datahub-ranger-plugin-<version>.jar -5. Register the downloaded service definition file with Apache Ranger Service. To do this executes the below curl command
    -Replace variables with corresponding values in curl command - - <ranger-admin-username> - - <ranger-admin-password> - - <ranger-host> - ```bash - curl -u : -X POST -H "Accept: application/json" -H "Content-Type: application/json" --data @servicedef.json http://:6080/service/public/v2/api/servicedef - ``` - -### Defining a Ranger Policy - -Now, you should have the DataHub plugin registered with Apache Ranger. Next, we'll create a sample user and add them to our first resource policy. - -1. Login into the Apache Ranger UI (Privacera Portal) to performs below steps. -2. Verify **datahub-ranger-plugin** is registered successfully: The **datahub-ranger-plugin** should be visible as **DATAHUB** in *Access Management -> Resource Policies*. -3. Create a service under the plugin **DATAHUB** with name **ranger_datahub** - - **DATAHUB** plugin and **ranger_datahub** service is shown in below screenshot:
    - - ![Privacera Portal DATAHUB screenshot](../imgs/apache-ranger/datahub-plugin.png) - -4. Create a new policy under service **ranger_datahub** - this will be used to control DataHub authorization. -5. Create a test user & assign them to a policy. We'll use the `datahub` user, which is the default root user inside DataHub. - - To do this performs below steps - - Create a user **datahub** - - Create a policy under **ranger_datahub** service. To assign [Platform Privileges](../authorization/policies.md#privileges) (e.g. Admin privileges), simply use the "platform" resource type which is defined. To test the flow, we can simply assign the **datahub** user all platform privileges that are available through the Ranger UI. This will enable the "datahub" to have full platform admin privileges. - - > To define fine-grained resource privileges, e.g. for DataHub Datasets, Dashboards, Charts, and more, you can simply select the appropriate Resource Type in the Ranger policy builder. You should also see a list of privileges that are supported for each resource type, which correspond to the actions that you can perform. To learn more about supported privileges, check out the DataHub [Policies Guide](../authorization/policies.md#privileges). - - DataHub platform access policy screenshot:
    - - ![Privacera Portal DATAHUB screenshot](../imgs/apache-ranger/datahub-platform-access-policy.png) - -Once we've created our first policy, we can set up DataHub to start authorizing requests using Ranger policies. - - -## Configuring your DataHub Deployment - -Perform the following steps to configure DataHub to send incoming requests to Apache Ranger for authorization. - -1. Download Apache Ranger security xml [ranger-datahub-security.xml](https://github.com/acryldata/datahub-ranger-auth-plugin/blob/main/datahub-ranger-plugin/conf/ranger-datahub-security.xml) -2. In **ranger-datahub-security.xml** edit the value of property *ranger.plugin.datahub.policy.rest.url*. Sample snippet is shown below - ```xml - - ranger.plugin.datahub.policy.rest.url - http://199.209.9.70:6080 - - URL to Ranger Admin - - - ``` - -As per your deployment follow either Docker or Kubernetes section below -### Docker - -**Build Ranger Authorizer Plugin** -1. Clone DataHub Repo: Clone the DataHub repository - ```shell - cd ~/ - git clone https://github.com/acryldata/datahub-ranger-auth-plugin.git - ``` -2. Go inside the datahub directory: You should be inside the `datahub-ranger-auth-plugin` directory to execute build command - ```shell - cd ~/datahub-ranger-auth-plugin/ - ``` -3. Build plugin: Execute below gradle command to build Ranger Authorizer Plugin jar - ```shell - ./gradlew apache-ranger-plugin:shadowJar - ``` - This step will generate a jar file i.e. ./apache-ranger-plugin/build/libs/apache-ranger-plugin-<version>-SNAPSHOT.jar. - - Let's call this jar as ranger-plugin-jar. We need this jar in below step (Configure Ranger Authorizer Plugin) - - -**Configure Ranger Authorizer Plugin** - -On the host where `datahub-gms` is deployed, follow these steps: -1. Create directory `~/.datahub/plugins/auth/apache-ranger-authorizer/`: Executes below command - ```bash - mkdir -p ~/.datahub/plugins/auth/apache-ranger-authorizer/ - ``` -2. Copy `ranger-datahub-security.xml` file to `~/.datahub/plugins/auth/apache-ranger-authorizer/` -3. Copy ranger-plugin-jar: Copy the apache-ranger-plugin-<version>-SNAPSHOT.jar - ```bash - cp ./apache-ranger-plugin/build/libs/apache-ranger-plugin--SNAPSHOT.jar ~/.datahub/plugins/auth/apache-ranger-authorizer/apache-ranger-authorizer.jar - ``` -4. Create `config.yml`: Create config.yml if not exist - ```shell - touch ~/.datahub/plugins/auth/config.yml - ``` -5. Set Apache Ranger Plugin config: Add below entry in config.yml file. Set username and password to Apache Ranger user credentials - ```yaml - plugins: - - name: "apache-ranger-authorizer" - type: "authorizer" - enabled: "true" - params: - className: "com.datahub.authorization.ranger.RangerAuthorizer" - configs: - username: "" - password: "" - ``` -6. Restart DataHub GMS container (i.e. `datahub-gms`) - - -### Kubernetes - -Helm support is coming soon. - - -That's it! Now we can test out the integration. - -### Validating your Setup -To verify that things are working as expected, we can test that the root **datahub** user has all Platform Privileges and is able to perform all operations: managing users & groups, creating domains, and more. To do this, simply log into your DataHub deployment via the root DataHub user. - -# Revert the Configuration -If you want to revert your deployment configuration and don't want Apache Ranger to control the authorization of your DataHub deployment -then follow the below sections to undo the configuration steps you have performed in section *Configuring Authorization with Apache Ranger* - -1. Revert Configuration of your Apache Ranger Deployment -2. Revert Configuration of your DataHub Deployment - -## Revert Configuration of your Apache Ranger Deployment - For kubernetes example command, please replace the <ranger-pod-name> and <namespace> as per your environment. - - 1. Delete **ranger_datahub** service: Login into the Privacera Portal and delete service **ranger_datahub** - - **ranger_datahub** service is shown in below screenshot:
    - - ![Privacera Portal DATAHUB screenshot](../imgs/apache-ranger/datahub-plugin.png) - - 2. Delete **datahub** plugin: Execute below curl command to delete **datahub** plugin - Replace variables with corresponding values in curl command - - <ranger-admin-username> - - <ranger-admin-password> - - <ranger-host> - - ```bash - curl -u : -X DELETE -H "Accept: application/json" -H "Content-Type: application/json" http://:6080/service/public/v2/api/servicedef/name/datahub - ``` - 3. Delete **datahub** plugin directory: Execute below command to delete the **datahub** plugin directory from Apache Ranger - - *Docker command:* - ```bash - docker exec privacera_ranger_1 rm -rf ews/webapp/WEB-INF/classes/ranger-plugins/datahub - ``` - *Kubernetes command:* - ```bash - kubectl exec -n -- sh -c 'rm -rf ews/webapp/WEB-INF/classes/ranger-plugins/datahub' - ``` - - -## Revert Configuration of your DataHub Deployment -### Docker - 1. Remove Apache Ranger Plugin entry: From `config.yml` file remove the entry which was added for Apache Ranger Plugin - 2. Redeploy DataHub (`datahub-gms`) -### Kubernetes - For kubernetes example command, please replace the <namespace> as per your environment. -1. Open deployment editor: Execute below command - ```bash - kubectl edit deployment datahub-datahub-gms -n - ``` -2. Remove below environments variables - 1. AUTH_POLICIES_ENABLED - 2. RANGER_AUTHORIZER_ENABLED - 3. RANGER_USERNAME - 4. RANGER_PASSWORD -3. Remove below volumes related settings - 1. volumes - 2. volumeMounts -4. Save and quit the editor and use below command to check status of **datahub-datahub-gms** deployment rollout - ```bash - kubectl rollout status deployment/datahub-datahub-gms -n - ``` - On successful rollout you should see a message *deployment "datahub-datahub-gms" successfully rolled out* - - -### Validating your Setup -To verify that things are working as expected, we can test that the root **datahub** user has all Platform Privileges and is able to perform all operations: managing users & groups, creating domains, and more. To do this, simply log into your DataHub deployment via the root DataHub user. diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/datasets/dataset_wrappers.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/datasets/dataset_wrappers.py deleted file mode 100644 index d6a5e957ec3b44465432617cf6e8f0b86a8a5efa..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmseg/datasets/dataset_wrappers.py +++ /dev/null @@ -1,50 +0,0 @@ -from torch.utils.data.dataset import ConcatDataset as _ConcatDataset - -from .builder import DATASETS - - -@DATASETS.register_module() -class ConcatDataset(_ConcatDataset): - """A wrapper of concatenated dataset. - - Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but - concat the group flag for image aspect ratio. - - Args: - datasets (list[:obj:`Dataset`]): A list of datasets. - """ - - def __init__(self, datasets): - super(ConcatDataset, self).__init__(datasets) - self.CLASSES = datasets[0].CLASSES - self.PALETTE = datasets[0].PALETTE - - -@DATASETS.register_module() -class RepeatDataset(object): - """A wrapper of repeated dataset. - - The length of repeated dataset will be `times` larger than the original - dataset. This is useful when the data loading time is long but the dataset - is small. Using RepeatDataset can reduce the data loading time between - epochs. - - Args: - dataset (:obj:`Dataset`): The dataset to be repeated. - times (int): Repeat times. - """ - - def __init__(self, dataset, times): - self.dataset = dataset - self.times = times - self.CLASSES = dataset.CLASSES - self.PALETTE = dataset.PALETTE - self._ori_len = len(self.dataset) - - def __getitem__(self, idx): - """Get item from original dataset.""" - return self.dataset[idx % self._ori_len] - - def __len__(self): - """The length is multiplied by ``times``""" - return self.times * self._ori_len diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/apis/test.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/apis/test.py deleted file mode 100644 index e574eb7da04f09a59cf99ff953c36468ae87a326..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/apis/test.py +++ /dev/null @@ -1,238 +0,0 @@ -import os.path as osp -import pickle -import shutil -import tempfile - -import annotator.uniformer.mmcv as mmcv -import numpy as np -import torch -import torch.distributed as dist -from annotator.uniformer.mmcv.image import tensor2imgs -from annotator.uniformer.mmcv.runner import get_dist_info - - -def np2tmp(array, temp_file_name=None): - """Save ndarray to local numpy file. - - Args: - array (ndarray): Ndarray to save. - temp_file_name (str): Numpy file name. If 'temp_file_name=None', this - function will generate a file name with tempfile.NamedTemporaryFile - to save ndarray. Default: None. - - Returns: - str: The numpy file name. - """ - - if temp_file_name is None: - temp_file_name = tempfile.NamedTemporaryFile( - suffix='.npy', delete=False).name - np.save(temp_file_name, array) - return temp_file_name - - -def single_gpu_test(model, - data_loader, - show=False, - out_dir=None, - efficient_test=False, - opacity=0.5): - """Test with single GPU. - - Args: - model (nn.Module): Model to be tested. - data_loader (utils.data.Dataloader): Pytorch data loader. - show (bool): Whether show results during inference. Default: False. - out_dir (str, optional): If specified, the results will be dumped into - the directory to save output results. - efficient_test (bool): Whether save the results as local numpy files to - save CPU memory during evaluation. Default: False. - opacity(float): Opacity of painted segmentation map. - Default 0.5. - Must be in (0, 1] range. - Returns: - list: The prediction results. - """ - - model.eval() - results = [] - dataset = data_loader.dataset - prog_bar = mmcv.ProgressBar(len(dataset)) - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, **data) - - if show or out_dir: - img_tensor = data['img'][0] - img_metas = data['img_metas'][0].data[0] - imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) - assert len(imgs) == len(img_metas) - - for img, img_meta in zip(imgs, img_metas): - h, w, _ = img_meta['img_shape'] - img_show = img[:h, :w, :] - - ori_h, ori_w = img_meta['ori_shape'][:-1] - img_show = mmcv.imresize(img_show, (ori_w, ori_h)) - - if out_dir: - out_file = osp.join(out_dir, img_meta['ori_filename']) - else: - out_file = None - - model.module.show_result( - img_show, - result, - palette=dataset.PALETTE, - show=show, - out_file=out_file, - opacity=opacity) - - if isinstance(result, list): - if efficient_test: - result = [np2tmp(_) for _ in result] - results.extend(result) - else: - if efficient_test: - result = np2tmp(result) - results.append(result) - - batch_size = len(result) - for _ in range(batch_size): - prog_bar.update() - return results - - -def multi_gpu_test(model, - data_loader, - tmpdir=None, - gpu_collect=False, - efficient_test=False): - """Test model with multiple gpus. - - This method tests model with multiple gpus and collects the results - under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' - it encodes results to gpu tensors and use gpu communication for results - collection. On cpu mode it saves the results on different gpus to 'tmpdir' - and collects them by the rank 0 worker. - - Args: - model (nn.Module): Model to be tested. - data_loader (utils.data.Dataloader): Pytorch data loader. - tmpdir (str): Path of directory to save the temporary results from - different gpus under cpu mode. - gpu_collect (bool): Option to use either gpu or cpu to collect results. - efficient_test (bool): Whether save the results as local numpy files to - save CPU memory during evaluation. Default: False. - - Returns: - list: The prediction results. - """ - - model.eval() - results = [] - dataset = data_loader.dataset - rank, world_size = get_dist_info() - if rank == 0: - prog_bar = mmcv.ProgressBar(len(dataset)) - for i, data in enumerate(data_loader): - with torch.no_grad(): - result = model(return_loss=False, rescale=True, **data) - - if isinstance(result, list): - if efficient_test: - result = [np2tmp(_) for _ in result] - results.extend(result) - else: - if efficient_test: - result = np2tmp(result) - results.append(result) - - if rank == 0: - batch_size = data['img'][0].size(0) - for _ in range(batch_size * world_size): - prog_bar.update() - - # collect results from all ranks - if gpu_collect: - results = collect_results_gpu(results, len(dataset)) - else: - results = collect_results_cpu(results, len(dataset), tmpdir) - return results - - -def collect_results_cpu(result_part, size, tmpdir=None): - """Collect results with CPU.""" - rank, world_size = get_dist_info() - # create a tmp dir if it is not specified - if tmpdir is None: - MAX_LEN = 512 - # 32 is whitespace - dir_tensor = torch.full((MAX_LEN, ), - 32, - dtype=torch.uint8, - device='cuda') - if rank == 0: - tmpdir = tempfile.mkdtemp() - tmpdir = torch.tensor( - bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') - dir_tensor[:len(tmpdir)] = tmpdir - dist.broadcast(dir_tensor, 0) - tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() - else: - mmcv.mkdir_or_exist(tmpdir) - # dump the part result to the dir - mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank))) - dist.barrier() - # collect all parts - if rank != 0: - return None - else: - # load results of all parts from tmp dir - part_list = [] - for i in range(world_size): - part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i)) - part_list.append(mmcv.load(part_file)) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - # remove tmp dir - shutil.rmtree(tmpdir) - return ordered_results - - -def collect_results_gpu(result_part, size): - """Collect results with GPU.""" - rank, world_size = get_dist_info() - # dump result part to tensor with pickle - part_tensor = torch.tensor( - bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') - # gather all result part tensor shape - shape_tensor = torch.tensor(part_tensor.shape, device='cuda') - shape_list = [shape_tensor.clone() for _ in range(world_size)] - dist.all_gather(shape_list, shape_tensor) - # padding result part tensor to max length - shape_max = torch.tensor(shape_list).max() - part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') - part_send[:shape_tensor[0]] = part_tensor - part_recv_list = [ - part_tensor.new_zeros(shape_max) for _ in range(world_size) - ] - # gather all result part - dist.all_gather(part_recv_list, part_send) - - if rank == 0: - part_list = [] - for recv, shape in zip(part_recv_list, shape_list): - part_list.append( - pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) - # sort the results - ordered_results = [] - for res in zip(*part_list): - ordered_results.extend(list(res)) - # the dataloader may pad some samples - ordered_results = ordered_results[:size] - return ordered_results diff --git a/spaces/abidlabs/gpt-talking-portrait/style.css b/spaces/abidlabs/gpt-talking-portrait/style.css deleted file mode 100644 index 967321d3a240a43932b80be617e11e9d44c5d52b..0000000000000000000000000000000000000000 --- a/spaces/abidlabs/gpt-talking-portrait/style.css +++ /dev/null @@ -1,165 +0,0 @@ -div#col-container, div#col-container-2 { - max-width: 510px; - margin-left: auto; - margin-right: auto; -} - -div#col-container-2 { - margin-top: -16px; -} - -a { - text-decoration-line: underline; - font-weight: 600; - color: inherit; -} - -.dark a { - color: white; -} - -div#record_btn .mic-wrap > button { - width: 100%; - margin-top: 6px; - padding: 9px; - border-radius: 20px; -} - -div#error_handler { - margin-bottom: 16px; -} - -div#error_handler .openai_error { - color: #ff7900; - background: #fff1e4; - border: 1px solid #ffe4cb; -} - -.dark div#error_handler .openai_error { - color: #ff7900; - background: #1b1303; - border: 1px solid #2c1f13; -} - -div#error_handler .openai_clear { - color: #057857; - background: #ecfdf5; - border: 1px solid #d1fae5; -} - -.dark div#error_handler .openai_clear { - color: #15b881; - background: #111827; - border: 1px solid #151c2e; -} - -div#error_handler span { - font-family: 'IBM Plex Mono'; - font-size: 0.8em; - padding: 16px; - display: flex; - /*border: 1px solid #e5e7eb;*/ - border-radius: 10px; -} - -.dark div#error_handler span { - /*border: 1px solid #374151;*/ -} - -div#secondary-buttons{ - display: flex; - flex-direction: flex-end; -} - -button#clean-btn { - border-radius: 20px; - background: none; -} - -button#clean-btn:hover { - border-radius: 20px; - background: #f1f1f1; -} - -.dark button#clean-btn:hover { - background: #4b5563; -} - -.footer { - margin-bottom: 45px; - margin-top: 10px; - text-align: center; - border-bottom: 1px solid #e5e5e5; -} - -.footer>p { - font-size: .8rem; - display: inline-block; - padding: 0 10px; - transform: translateY(25px); - background: white; -} - -.dark .footer { - border-color: #303030; -} - -.dark .footer>p { - background: #0b0f19; -} - -.animate-spin { - animation: spin 1s linear infinite; -} - -@keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} -#share-btn-container { - display: flex; - padding-left: 0.5rem !important; - padding-right: 0.5rem !important; - background-color: #000000; - justify-content: center; - align-items: center; - border-radius: 9999px !important; - max-width: 13rem; -} - -#share-btn-container:hover { - background-color: #060606; -} - -#share-btn { - all: initial; - color: #ffffff; - font-weight: 600; - cursor:pointer; - font-family: 'IBM Plex Sans', sans-serif; - margin-left: 0.5rem !important; - padding-top: 0.5rem !important; - padding-bottom: 0.5rem !important; - right:0; -} - -#share-btn * { - all: unset; -} - -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} - -#share-btn-container .wrap { - display: none !important; -} - -#share-btn-container.hidden { - display: none!important; -} \ No newline at end of file diff --git a/spaces/akhaliq/BLIP/app.py b/spaces/akhaliq/BLIP/app.py deleted file mode 100644 index 060c94754729eaa13734bf3420327804f682df81..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/BLIP/app.py +++ /dev/null @@ -1,10 +0,0 @@ -import gradio as gr - - -description="Space for BLIP is now at https://huggingface.co/spaces/Salesforce/BLIP

    " - -def greet(name): - return "Hello " + name + "!!" - -iface = gr.Interface(fn=greet, inputs="text", outputs="text",description=description) -iface.launch() \ No newline at end of file diff --git a/spaces/akhaliq/GPEN/sr_model/rrdbnet_arch.py b/spaces/akhaliq/GPEN/sr_model/rrdbnet_arch.py deleted file mode 100644 index 5e1f04c5aee5bcdcd2ddae5471843ff057d863b4..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/GPEN/sr_model/rrdbnet_arch.py +++ /dev/null @@ -1,116 +0,0 @@ -import torch -from torch import nn as nn -from torch.nn import functional as F - -from arch_util import default_init_weights, make_layer, pixel_unshuffle - - -class ResidualDenseBlock(nn.Module): - """Residual Dense Block. - - Used in RRDB block in ESRGAN. - - Args: - num_feat (int): Channel number of intermediate features. - num_grow_ch (int): Channels for each growth. - """ - - def __init__(self, num_feat=64, num_grow_ch=32): - super(ResidualDenseBlock, self).__init__() - self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1) - self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1) - self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1) - self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1) - self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1) - - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - # initialization - default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1) - - def forward(self, x): - x1 = self.lrelu(self.conv1(x)) - x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1))) - x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1))) - x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1))) - x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1)) - # Emperically, we use 0.2 to scale the residual for better performance - return x5 * 0.2 + x - - -class RRDB(nn.Module): - """Residual in Residual Dense Block. - - Used in RRDB-Net in ESRGAN. - - Args: - num_feat (int): Channel number of intermediate features. - num_grow_ch (int): Channels for each growth. - """ - - def __init__(self, num_feat, num_grow_ch=32): - super(RRDB, self).__init__() - self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch) - self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch) - self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch) - - def forward(self, x): - out = self.rdb1(x) - out = self.rdb2(out) - out = self.rdb3(out) - # Emperically, we use 0.2 to scale the residual for better performance - return out * 0.2 + x - -class RRDBNet(nn.Module): - """Networks consisting of Residual in Residual Dense Block, which is used - in ESRGAN. - - ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks. - - We extend ESRGAN for scale x2 and scale x1. - Note: This is one option for scale 1, scale 2 in RRDBNet. - We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size - and enlarge the channel size before feeding inputs into the main ESRGAN architecture. - - Args: - num_in_ch (int): Channel number of inputs. - num_out_ch (int): Channel number of outputs. - num_feat (int): Channel number of intermediate features. - Default: 64 - num_block (int): Block number in the trunk network. Defaults: 23 - num_grow_ch (int): Channels for each growth. Default: 32. - """ - - def __init__(self, num_in_ch, num_out_ch, scale=4, num_feat=64, num_block=23, num_grow_ch=32): - super(RRDBNet, self).__init__() - self.scale = scale - if scale == 2: - num_in_ch = num_in_ch * 4 - elif scale == 1: - num_in_ch = num_in_ch * 16 - self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1) - self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch) - self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - # upsample - self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1) - self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - - self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True) - - def forward(self, x): - if self.scale == 2: - feat = pixel_unshuffle(x, scale=2) - elif self.scale == 1: - feat = pixel_unshuffle(x, scale=4) - else: - feat = x - feat = self.conv_first(feat) - body_feat = self.conv_body(self.body(feat)) - feat = feat + body_feat - # upsample - feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest'))) - feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest'))) - out = self.conv_last(self.lrelu(self.conv_hr(feat))) - return out diff --git a/spaces/akhaliq/T0pp/README.md b/spaces/akhaliq/T0pp/README.md deleted file mode 100644 index 462e144e4db8912cc7f05b608d78273d869a763f..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/T0pp/README.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -title: T0pp -emoji: 👀 -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/alamin655/websurfx/docs/introduction.md b/spaces/alamin655/websurfx/docs/introduction.md deleted file mode 100644 index 7b540fbb828f4bfae2b8c49461e4d1e703958603..0000000000000000000000000000000000000000 --- a/spaces/alamin655/websurfx/docs/introduction.md +++ /dev/null @@ -1,13 +0,0 @@ -# Introduction - -A modern-looking, lightning-fast, privacy-respecting, secure [meta search engine](https://en.wikipedia.org/wiki/Metasearch_engine) (pronounced as websurface or web-surface /wɛbˈsɜːrfəs/.) written in Rust. It provides a fast and secure search experience while respecting user privacy. - -# Motivation - -Most meta search engines tend to be slow, lack high level of customization and missing many features and all of them like security as they are written in unsafe languages like python, javascript, etc which tend to open a wide variety of vulnerabilities which can also sometimes pose a threat to privacy as sometimes this can be exploited and can be used to leveraged to leak out sensitive information which is never good. - -# Solution - -Websurfx is a project which seeks to provide privacy, security, speed and all the features which the user wants. - -[⬅️ Go back to Home](./README.md) diff --git a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/AttlistDecl.pod b/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/AttlistDecl.pod deleted file mode 100644 index 56f2c71112e2794096833e65126c30a2f7dd84b4..0000000000000000000000000000000000000000 --- a/spaces/aliabd/SummerTime/model/third_party/HMNet/ThirdParty/ROUGE/ROUGE-1.5.5/XML/DOM/AttlistDecl.pod +++ /dev/null @@ -1,45 +0,0 @@ -=head1 NAME - -XML::DOM::AttlistDecl - An XML ATTLIST declaration in XML::DOM - -=head1 DESCRIPTION - -XML::DOM::AttlistDecl extends L but is not part of the -DOM Level 1 specification. - -This node represents an ATTLIST declaration, e.g. - - - -Each attribute definition is stored a separate AttDef node. The AttDef nodes can -be retrieved with getAttDef and added with addAttDef. -(The AttDef nodes are stored in a NamedNodeMap internally.) - -=head2 METHODS - -=over 4 - -=item getName - -Returns the Element tagName. - -=item getAttDef (attrName) - -Returns the AttDef node for the attribute with the specified name. - -=item addAttDef (attrName, type, default, [ fixed ]) - -Adds a AttDef node for the attribute with the specified name. - -Parameters: - I the attribute name. - I the attribute type (e.g. "CDATA" or "(male|female)".) - I the default value enclosed in quotes (!), the string #IMPLIED or - the string #REQUIRED. - I whether the attribute is '#FIXED' (default is 0.) - -=back diff --git a/spaces/allknowingroger/Image-Models-Test208/README.md b/spaces/allknowingroger/Image-Models-Test208/README.md deleted file mode 100644 index f91e4b31ab345f987b425de029c057bfb69d9e1b..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test208/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test ---- - - \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test52/README.md b/spaces/allknowingroger/Image-Models-Test52/README.md deleted file mode 100644 index dc049cb5168f22e428963ec52a63568f67f41ef4..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test52/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Image Models -emoji: 👀 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test51 ---- - - \ No newline at end of file diff --git a/spaces/amanatid/Adi_The_ArxivGPT_with_Voice/base.py b/spaces/amanatid/Adi_The_ArxivGPT_with_Voice/base.py deleted file mode 100644 index 9c9b36d80bbc771a4a7ce2b8748a2f753f01e163..0000000000000000000000000000000000000000 --- a/spaces/amanatid/Adi_The_ArxivGPT_with_Voice/base.py +++ /dev/null @@ -1,391 +0,0 @@ -"""Read Arxiv Papers.""" -import hashlib -import logging -import os -from typing import List, Optional, Tuple - -from llama_index import download_loader -from llama_index.readers.base import BaseReader -from llama_index.readers.schema.base import Document -###### -from llama_index import SimpleDirectoryReader -###### -from fpdf import FPDF - -class ArxivReader_mod(BaseReader): - """Arxiv Reader. - - Gets a search query, return a list of Documents of the top corresponding scientific papers on Arxiv. - """ - - def __init__( - self, - ): - """Initialize with parameters.""" - super().__init__() - - def _hacky_hash(self, some_string): - _hash = hashlib.md5(some_string.encode("utf-8")).hexdigest() - return _hash - - def load_data( - self, - search_query: str, - papers_dir: Optional[str] = r".papers", - max_results: Optional[int] = 50, - search_criterion: Optional[int] = 0, - ) -> List[Document]: - """Search for a topic on Arxiv, download the PDFs of the top results locally, then read them. - - Args: - search_query (str): A topic to search for (e.g. "Artificial Intelligence"). - papers_dir (Optional[str]): Locally directory to store the papers - max_results (Optional[int]): Maximum number of papers to fetch. - - Returns: - List[Document]: A list of Document objects. - """ - # find papers - import arxiv - - if search_criterion == 0: - sort_criterion = arxiv.SortCriterion.Relevance - - if search_criterion == 1: - sort_criterion = arxiv.SortCriterion.LastUpdatedDate - - if search_criterion == 2: - sort_criterion = arxiv.SortCriterion.SubmittedDate - - arxiv_search = arxiv.Search( - query=search_query, - id_list=[], - max_results=max_results, - sort_by= arxiv.SortCriterion.Relevance, - ) - - search_results = list(arxiv_search.results()) - logging.debug(f"> Successfully fetched {len(search_results)} papers") - - # Delete downloaded papers - try: - for f in os.listdir(papers_dir): - os.remove(os.path.join(papers_dir, f)) - logging.debug(f"> Deleted file: {f}") - os.rmdir(papers_dir) - logging.debug(f"> Deleted directory: {papers_dir}") - except OSError: - print("Unable to delete files or directory") - - - #create directory - if not os.path.exists(papers_dir): - os.makedirs(papers_dir) - - paper_lookup = {} - for paper in search_results: - # Hash filename to avoid bad charaters in file path - filename = f"{self._hacky_hash(paper.title)}.pdf" - paper_lookup[os.path.join(papers_dir, filename)] = { - "Title of this paper": paper.title, - "Authors": (", ").join([a.name for a in paper.authors]), - "Date published": paper.published.strftime("%m/%d/%Y"), - "URL": paper.entry_id, - # "summary": paper.summary - } - paper.download_pdf(dirpath=papers_dir, filename=filename) - logging.debug(f"> Downloading {filename}...") - - def get_paper_metadata(filename): - return paper_lookup[filename] - - #SimpleDirectoryReader = download_loader("SimpleDirectoryReader") - ############## - arxiv_documents = SimpleDirectoryReader(papers_dir, file_metadata=get_paper_metadata).load_data() - ############ - - # Include extra documents containing the abstracts - abstract_documents = [] - for paper in search_results: - d = f"The following is a summary of the paper: {paper.title}\n\nSummary: {paper.summary}" - abstract_documents.append(Document(text=d)) - - - - return arxiv_documents + abstract_documents - - def load_papers_and_abstracts( - self, - search_query: str, - papers_dir: Optional[str] = ".papers", - max_results: Optional[int] = 10, - ) -> Tuple[List[Document], List[Document]]: - """Search for a topic on Arxiv, download the PDFs of the top results locally, then read them. - - Args: - search_query (str): A topic to search for (e.g. "Artificial Intelligence"). - papers_dir (Optional[str]): Locally directory to store the papers - max_results (Optional[int]): Maximum number of papers to fetch. - - Returns: - List[Document]: A list of Document objects representing the papers themselves - List[Document]: A list of Document objects representing abstracts only - """ - import arxiv - - arxiv_search = arxiv.Search( - query=search_query, - id_list=[], - max_results=max_results, - sort_by=arxiv.SortCriterion.Relevance, - ) - search_results = list(arxiv_search.results()) - logging.debug(f"> Successfully fetched {len(search_results)} papers") - - if not os.path.exists(papers_dir): - os.makedirs(papers_dir) - - paper_lookup = {} - for paper in search_results: - # Hash filename to avoid bad charaters in file path - filename = f"{self._hacky_hash(paper.title)}.pdf" - paper_lookup[os.path.join(papers_dir, filename)] = { - "Title of this paper": paper.title, - "Authors": (", ").join([a.name for a in paper.authors]), - "Date published": paper.published.strftime("%m/%d/%Y"), - "URL": paper.entry_id, - # "summary": paper.summary - } - paper.download_pdf(dirpath=papers_dir, filename=filename) - logging.debug(f"> Downloading {filename}...") - - def get_paper_metadata(filename): - return paper_lookup[filename] - - SimpleDirectoryReader = download_loader("SimpleDirectoryReader") - arxiv_documents = SimpleDirectoryReader( - papers_dir, file_metadata=get_paper_metadata - ).load_data() - # Include extra documents containing the abstracts - abstract_documents = [] - for paper in search_results: - d = f"The following is a summary of the paper: {paper.title}\n\nSummary: {paper.summary}" - abstract_documents.append(Document(d)) - - # Delete downloaded papers - try: - for f in os.listdir(papers_dir): - os.remove(os.path.join(papers_dir, f)) - logging.debug(f"> Deleted file: {f}") - os.rmdir(papers_dir) - logging.debug(f"> Deleted directory: {papers_dir}") - except OSError: - print("Unable to delete files or directory") - - return arxiv_documents, abstract_documents - -class ArxivReader_mod_search(BaseReader): - """Arxiv Reader. - - Gets a search query, return a list of Documents of the top corresponding scientific papers on Arxiv. - """ - - def __init__( - self, - ): - """Initialize with parameters.""" - super().__init__() - - def _hacky_hash(self, some_string): - _hash = hashlib.md5(some_string.encode("utf-8")).hexdigest() - return _hash - - def load_data( - self, - search_query: str, - papers_dir: Optional[str] = ".papers", - max_results: Optional[int] = 50, - search_criterion: Optional[int] = 0, - ) -> List[Document]: - """Search for a topic on Arxiv, download the PDFs of the top results locally, then read them. - - Args: - search_query (str): A topic to search for (e.g. "Artificial Intelligence"). - papers_dir (Optional[str]): Locally directory to store the papers - max_results (Optional[int]): Maximum number of papers to fetch. - - Returns: - List[Document]: A list of Document objects. - """ - #find papers - import arxiv - if search_criterion == 0: - sort_criterion = arxiv.SortCriterion.Relevance - - if search_criterion == 1: - sort_criterion = arxiv.SortCriterion.LastUpdatedDate - - if search_criterion == 2: - sort_criterion = arxiv.SortCriterion.SubmittedDate - - arxiv_search = arxiv.Search( - query=search_query, - id_list=[], - max_results=max_results, - sort_by= sort_criterion, - ) - search_results = list(arxiv_search.results()) - logging.debug(f"> Successfully fetched {len(search_results)} papers") - - #create directory - if not os.path.exists(papers_dir): - os.makedirs(papers_dir) - else: - # Delete downloaded papers - try: - for f in os.listdir(papers_dir): - os.remove(os.path.join(papers_dir, f)) - logging.debug(f"> Deleted file: {f}") - os.rmdir(papers_dir) - logging.debug(f"> Deleted directory: {papers_dir}") - os.makedirs(papers_dir) - except OSError: - print("Unable to delete files or directory") - - paper_lookup = {} - for paper in search_results: - # Hash filename to avoid bad charaters in file path - filename = f"{self._hacky_hash(paper.title)}.pdf" - #filename = f"{paper.title}.pdf" - paper_lookup[os.path.join(papers_dir, filename)] = { - "Title of this paper": paper.title, - "Authors": (", ").join([a.name for a in paper.authors]), - "Date published": paper.published.strftime("%m/%d/%Y"), - "URL": paper.entry_id, - "summary": paper.summary, - } - - paper.download_pdf(dirpath=papers_dir, filename=filename) - logging.debug(f"> Downloading {filename}...") - - def get_paper_metadata(filename): - return paper_lookup[filename] - - SimpleDirectoryReader = download_loader("SimpleDirectoryReader") - arxiv_documents = SimpleDirectoryReader( - papers_dir, file_metadata=get_paper_metadata - ).load_data() - # Include extra documents containing the abstracts - - # save FPDF() class into - # a variable pdf - pdf = FPDF() - - # Add a page - pdf.add_page() - - # set style and size of font - # that you want in the pdf - pdf.set_font("Arial", size=15) - - # insert the texts in pdf - for paper in search_results: - authors = (", ").join([a.name for a in paper.authors]) - pub_paper = paper.published.strftime("%m/%d/%Y") - d = f"Title: {paper.title}\n\nAuthors:{authors}\n\nDate:{pub_paper}\n\nAbstract: {paper.summary}\n" - pdf.multi_cell(0, 10, txt= d, border = 0) - pdf.add_page() - - - # save the pdf with name .pdf - pdf.output(papers_dir+"/abstracts.pdf") - - - - - - abstract_documents = [] - for paper in search_results: - authors =(", ").join([a.name for a in paper.authors]) - pub_paper =paper.published.strftime("%m/%d/%Y") - d = f"The following is a summary of the paper: {paper.title}\n\nAuthors:{authors}\n\nDate:{pub_paper}\n\nSummary: {paper.summary}" -# print(d) - abstract_documents.append(Document(d)) - - - return arxiv_documents + abstract_documents - - def load_papers_and_abstracts( - self, - search_query: str, - papers_dir: Optional[str] = ".papers", - max_results: Optional[int] = 10, - ) -> Tuple[List[Document], List[Document]]: - """Search for a topic on Arxiv, download the PDFs of the top results locally, then read them. - - Args: - search_query (str): A topic to search for (e.g. "Artificial Intelligence"). - papers_dir (Optional[str]): Locally directory to store the papers - max_results (Optional[int]): Maximum number of papers to fetch. - - Returns: - List[Document]: A list of Document objects representing the papers themselves - List[Document]: A list of Document objects representing abstracts only - """ - import arxiv - - arxiv_search = arxiv.Search( - query=search_query, - id_list=[], - max_results=max_results, - sort_by=arxiv.SortCriterion.Relevance, - ) - search_results = list(arxiv_search.results()) - logging.debug(f"> Successfully fetched {len(search_results)} papers") - - if not os.path.exists(papers_dir): - os.makedirs(papers_dir) - - paper_lookup = {} - for paper in search_results: - # Hash filename to avoid bad charaters in file path - filename = f"{self._hacky_hash(paper.title)}.pdf" - paper_lookup[os.path.join(papers_dir, filename)] = { - "Title of this paper": paper.title, - "Authors": (", ").join([a.name for a in paper.authors]), - "Date published": paper.published.strftime("%m/%d/%Y"), - "URL": paper.entry_id, - # "summary": paper.summary - } - paper.download_pdf(dirpath=papers_dir, filename=filename) - logging.debug(f"> Downloading {filename}...") - - def get_paper_metadata(filename): - return paper_lookup[filename] - - SimpleDirectoryReader = download_loader("SimpleDirectoryReader") - arxiv_documents = SimpleDirectoryReader( - papers_dir, file_metadata=get_paper_metadata - ).load_data() - # Include extra documents containing the abstracts - abstract_documents = [] - for paper in search_results: - d = f"The following is a summary of the paper: {paper.title}\n\nSummary: {paper.summary}" - abstract_documents.append(Document(d)) - - # Delete downloaded papers - try: - for f in os.listdir(papers_dir): - os.remove(os.path.join(papers_dir, f)) - logging.debug(f"> Deleted file: {f}") - os.rmdir(papers_dir) - logging.debug(f"> Deleted directory: {papers_dir}") - except OSError: - print("Unable to delete files or directory") - - return arxiv_documents, abstract_documents - - -#test = ArxivReader_mod_search() -#test.load_data(search_query='quantum gravity', -# max_results=3, search_criterion =1) \ No newline at end of file diff --git a/spaces/antonovmaxim/text-generation-webui-space/api-example-stream.py b/spaces/antonovmaxim/text-generation-webui-space/api-example-stream.py deleted file mode 100644 index ad8f7bf8105bf0bfa3a8a39e0af8e88b0d4b57d1..0000000000000000000000000000000000000000 --- a/spaces/antonovmaxim/text-generation-webui-space/api-example-stream.py +++ /dev/null @@ -1,67 +0,0 @@ -import asyncio -import json -import sys - -try: - import websockets -except ImportError: - print("Websockets package not found. Make sure it's installed.") - -# For local streaming, the websockets are hosted without ssl - ws:// -HOST = 'localhost:5005' -URI = f'ws://{HOST}/api/v1/stream' - -# For reverse-proxied streaming, the remote will likely host with ssl - wss:// -# URI = 'wss://your-uri-here.trycloudflare.com/api/v1/stream' - - -async def run(context): - # Note: the selected defaults change from time to time. - request = { - 'prompt': context, - 'max_new_tokens': 250, - 'do_sample': True, - 'temperature': 1.3, - 'top_p': 0.1, - 'typical_p': 1, - 'repetition_penalty': 1.18, - 'top_k': 40, - 'min_length': 0, - 'no_repeat_ngram_size': 0, - 'num_beams': 1, - 'penalty_alpha': 0, - 'length_penalty': 1, - 'early_stopping': False, - 'seed': -1, - 'add_bos_token': True, - 'truncation_length': 2048, - 'ban_eos_token': False, - 'skip_special_tokens': True, - 'stopping_strings': [] - } - - async with websockets.connect(URI, ping_interval=None) as websocket: - await websocket.send(json.dumps(request)) - - yield context # Remove this if you just want to see the reply - - while True: - incoming_data = await websocket.recv() - incoming_data = json.loads(incoming_data) - - match incoming_data['event']: - case 'text_stream': - yield incoming_data['text'] - case 'stream_end': - return - - -async def print_response_stream(prompt): - async for response in run(prompt): - print(response, end='') - sys.stdout.flush() # If we don't flush, we won't see tokens in realtime. - - -if __name__ == '__main__': - prompt = "In order to make homemade bread, follow these steps:\n1)" - asyncio.run(print_response_stream(prompt)) diff --git a/spaces/antonovmaxim/text-generation-webui-space/modules/evaluate.py b/spaces/antonovmaxim/text-generation-webui-space/modules/evaluate.py deleted file mode 100644 index adafa7137f8a676fee0595aa987dc37179561340..0000000000000000000000000000000000000000 --- a/spaces/antonovmaxim/text-generation-webui-space/modules/evaluate.py +++ /dev/null @@ -1,144 +0,0 @@ -import datetime -import traceback -from pathlib import Path - -import pandas as pd -import torch -from datasets import load_dataset -from tqdm import tqdm - -from modules import shared -from modules.models import load_model, unload_model -from modules.text_generation import encode -from server import get_model_specific_settings, update_model_parameters - - -def load_past_evaluations(): - if Path('logs/evaluations.csv').exists(): - df = pd.read_csv(Path('logs/evaluations.csv'), dtype=str) - df['Perplexity'] = pd.to_numeric(df['Perplexity']) - return df - else: - return pd.DataFrame(columns=['Model', 'LoRAs', 'Dataset', 'Perplexity', 'stride', 'max_length', 'Date', 'Comment']) - - -past_evaluations = load_past_evaluations() - - -def save_past_evaluations(df): - global past_evaluations - past_evaluations = df - df.to_csv(Path('logs/evaluations.csv'), index=False) - - -def calculate_perplexity(models, input_dataset, stride, _max_length): - ''' - Based on: - https://huggingface.co/docs/transformers/perplexity#calculating-ppl-with-fixedlength-models - ''' - - global past_evaluations - cumulative_log = '' - cumulative_log += "Loading the input dataset...\n" - yield cumulative_log - - # Copied from https://github.com/qwopqwop200/GPTQ-for-LLaMa/blob/triton/utils/datautils.py - if input_dataset == 'wikitext': - data = load_dataset('wikitext', 'wikitext-2-raw-v1', split='test') - text = "\n\n".join(data['text']) - elif input_dataset == 'ptb': - data = load_dataset('ptb_text_only', 'penn_treebank', split='validation') - text = "\n\n".join(data['sentence']) - elif input_dataset == 'ptb_new': - data = load_dataset('ptb_text_only', 'penn_treebank', split='test') - text = " ".join(data['sentence']) - else: - with open(Path(f'training/datasets/{input_dataset}.txt'), 'r', encoding='utf-8') as f: - text = f.read() - - for model in models: - if is_in_past_evaluations(model, input_dataset, stride, _max_length): - cumulative_log += f"{model} has already been tested. Ignoring.\n" - yield cumulative_log - continue - - if model != 'current model': - try: - yield cumulative_log + f"Loading {model}...\n" - model_settings = get_model_specific_settings(model) - shared.settings.update(model_settings) # hijacking the interface defaults - update_model_parameters(model_settings) # hijacking the command-line arguments - shared.model_name = model - unload_model() - shared.model, shared.tokenizer = load_model(shared.model_name) - except: - cumulative_log += f"Failed to load {model}. Moving on.\n" - yield cumulative_log - continue - - cumulative_log += f"Processing {model}...\n" - yield cumulative_log + "Tokenizing the input dataset...\n" - encodings = encode(text, add_special_tokens=False) - seq_len = encodings.shape[1] - max_length = _max_length or shared.model.config.max_position_embeddings - nlls = [] - prev_end_loc = 0 - for begin_loc in tqdm(range(0, seq_len, stride)): - yield cumulative_log + f"Evaluating... {100*begin_loc/seq_len:.2f}%" - end_loc = min(begin_loc + max_length, seq_len) - trg_len = end_loc - prev_end_loc # may be different from stride on last loop - input_ids = encodings[:, begin_loc:end_loc] - target_ids = input_ids.clone() - target_ids[:, :-trg_len] = -100 - - with torch.no_grad(): - outputs = shared.model(input_ids, labels=target_ids) - - # loss is calculated using CrossEntropyLoss which averages over valid labels - # N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels - # to the left by 1. - neg_log_likelihood = outputs.loss - - nlls.append(neg_log_likelihood) - - prev_end_loc = end_loc - if end_loc == seq_len: - break - - ppl = torch.exp(torch.stack(nlls).mean()) - add_entry_to_past_evaluations(float(ppl), shared.model_name, input_dataset, stride, _max_length) - save_past_evaluations(past_evaluations) - cumulative_log += f"Done. The perplexity is: {float(ppl)}\n\n" - yield cumulative_log - - -def add_entry_to_past_evaluations(perplexity, model, dataset, stride, max_length): - global past_evaluations - entry = { - 'Model': model, - 'LoRAs': ', '.join(shared.lora_names) or '-', - 'Dataset': dataset, - 'Perplexity': perplexity, - 'stride': str(stride), - 'max_length': str(max_length), - 'Date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), - 'Comment': '' - } - past_evaluations = pd.concat([past_evaluations, pd.DataFrame([entry])], ignore_index=True) - - -def is_in_past_evaluations(model, dataset, stride, max_length): - entries = past_evaluations[(past_evaluations['Model'] == model) & - (past_evaluations['Dataset'] == dataset) & - (past_evaluations['max_length'] == str(max_length)) & - (past_evaluations['stride'] == str(stride))] - - if entries.shape[0] > 0: - return True - else: - return False - - -def generate_markdown_table(): - sorted_df = past_evaluations.sort_values(by=['Dataset', 'stride', 'Perplexity', 'Date']) - return sorted_df diff --git a/spaces/aodianyun/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py b/spaces/aodianyun/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py deleted file mode 100644 index 76e4b272b479a26c63d120c818c140870cd8c287..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .backbone import build_backbone diff --git a/spaces/arjundutta10/Arjun_AI/README.md b/spaces/arjundutta10/Arjun_AI/README.md deleted file mode 100644 index d4b9a1d07f20307442edc17ad7cfa57d45490bc2..0000000000000000000000000000000000000000 --- a/spaces/arjundutta10/Arjun_AI/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Arjun AI -emoji: 🏃 -colorFrom: yellow -colorTo: green -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/artificialguybr/video-dubbing/TTS/docs/source/models/overflow.md b/spaces/artificialguybr/video-dubbing/TTS/docs/source/models/overflow.md deleted file mode 100644 index 09e270eae566d0c05c8c285af6504711d8f12cba..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/docs/source/models/overflow.md +++ /dev/null @@ -1,36 +0,0 @@ -# Overflow TTS - -Neural HMMs are a type of neural transducer recently proposed for -sequence-to-sequence modelling in text-to-speech. They combine the best features -of classic statistical speech synthesis and modern neural TTS, requiring less -data and fewer training updates, and are less prone to gibberish output caused -by neural attention failures. In this paper, we combine neural HMM TTS with -normalising flows for describing the highly non-Gaussian distribution of speech -acoustics. The result is a powerful, fully probabilistic model of durations and -acoustics that can be trained using exact maximum likelihood. Compared to -dominant flow-based acoustic models, our approach integrates autoregression for -improved modelling of long-range dependences such as utterance-level prosody. -Experiments show that a system based on our proposal gives more accurate -pronunciations and better subjective speech quality than comparable methods, -whilst retaining the original advantages of neural HMMs. Audio examples and code -are available at https://shivammehta25.github.io/OverFlow/. - - -## Important resources & papers -- HMM: https://de.wikipedia.org/wiki/Hidden_Markov_Model -- OverflowTTS paper: https://arxiv.org/abs/2211.06892 -- Neural HMM: https://arxiv.org/abs/2108.13320 -- Audio Samples: https://shivammehta25.github.io/OverFlow/ - - -## OverflowConfig -```{eval-rst} -.. autoclass:: TTS.tts.configs.overflow_config.OverflowConfig - :members: -``` - -## Overflow Model -```{eval-rst} -.. autoclass:: TTS.tts.models.overflow.Overflow - :members: -``` \ No newline at end of file diff --git a/spaces/artificialguybr/video-dubbing/TTS/recipes/thorsten_DE/download_thorsten_DE.sh b/spaces/artificialguybr/video-dubbing/TTS/recipes/thorsten_DE/download_thorsten_DE.sh deleted file mode 100644 index 27809ce50741e4491338f1cf04cbff52df1e26d9..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/recipes/thorsten_DE/download_thorsten_DE.sh +++ /dev/null @@ -1,21 +0,0 @@ -# create venv -python3 -m venv env -source .env/bin/activate -pip install pip --upgrade - -# download Thorsten_DE dataset -pip install gdown -gdown --id 1yKJM1LAOQpRVojKunD9r8WN_p5KzBxjc -O dataset.tgz -tar -xzf dataset.tgz - -# create train-val splits -shuf LJSpeech-1.1/metadata.csv > LJSpeech-1.1/metadata_shuf.csv -head -n 20668 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_train.csv -tail -n 2000 LJSpeech-1.1/metadata_shuf.csv > LJSpeech-1.1/metadata_val.csv - -# rename dataset and remove archive -mv LJSpeech-1.1 thorsten-de -rm dataset.tgz - -# destry venv -rm -rf env diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests/test_losses.py b/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests/test_losses.py deleted file mode 100644 index 522b7bb17ca59ee62d2b1de7245a3eab91339417..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/tests/tts_tests/test_losses.py +++ /dev/null @@ -1,239 +0,0 @@ -import unittest - -import torch as T - -from TTS.tts.layers.losses import BCELossMasked, L1LossMasked, MSELossMasked, SSIMLoss -from TTS.tts.utils.helpers import sequence_mask - - -class L1LossMaskedTests(unittest.TestCase): - def test_in_out(self): # pylint: disable=no-self-use - # test input == target - layer = L1LossMasked(seq_len_norm=False) - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.ones(4, 8, 128).float() - dummy_length = (T.ones(4) * 8).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() == 0.0 - - # test input != target - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.zeros(4, 8, 128).float() - dummy_length = (T.ones(4) * 8).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() == 1.0, "1.0 vs {}".format(output.item()) - - # test if padded values of input makes any difference - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.zeros(4, 8, 128).float() - dummy_length = (T.arange(5, 9)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert output.item() == 1.0, "1.0 vs {}".format(output.item()) - - dummy_input = T.rand(4, 8, 128).float() - dummy_target = dummy_input.detach() - dummy_length = (T.arange(5, 9)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert output.item() == 0, "0 vs {}".format(output.item()) - - # seq_len_norm = True - # test input == target - layer = L1LossMasked(seq_len_norm=True) - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.ones(4, 8, 128).float() - dummy_length = (T.ones(4) * 8).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() == 0.0 - - # test input != target - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.zeros(4, 8, 128).float() - dummy_length = (T.ones(4) * 8).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() == 1.0, "1.0 vs {}".format(output.item()) - - # test if padded values of input makes any difference - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.zeros(4, 8, 128).float() - dummy_length = (T.arange(5, 9)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert abs(output.item() - 1.0) < 1e-5, "1.0 vs {}".format(output.item()) - - dummy_input = T.rand(4, 8, 128).float() - dummy_target = dummy_input.detach() - dummy_length = (T.arange(5, 9)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert output.item() == 0, "0 vs {}".format(output.item()) - - -class MSELossMaskedTests(unittest.TestCase): - def test_in_out(self): # pylint: disable=no-self-use - # test input == target - layer = MSELossMasked(seq_len_norm=False) - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.ones(4, 8, 128).float() - dummy_length = (T.ones(4) * 8).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() == 0.0 - - # test input != target - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.zeros(4, 8, 128).float() - dummy_length = (T.ones(4) * 8).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() == 1.0, "1.0 vs {}".format(output.item()) - - # test if padded values of input makes any difference - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.zeros(4, 8, 128).float() - dummy_length = (T.arange(5, 9)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert output.item() == 1.0, "1.0 vs {}".format(output.item()) - - dummy_input = T.rand(4, 8, 128).float() - dummy_target = dummy_input.detach() - dummy_length = (T.arange(5, 9)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert output.item() == 0, "0 vs {}".format(output.item()) - - # seq_len_norm = True - # test input == target - layer = MSELossMasked(seq_len_norm=True) - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.ones(4, 8, 128).float() - dummy_length = (T.ones(4) * 8).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() == 0.0 - - # test input != target - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.zeros(4, 8, 128).float() - dummy_length = (T.ones(4) * 8).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() == 1.0, "1.0 vs {}".format(output.item()) - - # test if padded values of input makes any difference - dummy_input = T.ones(4, 8, 128).float() - dummy_target = T.zeros(4, 8, 128).float() - dummy_length = (T.arange(5, 9)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert abs(output.item() - 1.0) < 1e-5, "1.0 vs {}".format(output.item()) - - dummy_input = T.rand(4, 8, 128).float() - dummy_target = dummy_input.detach() - dummy_length = (T.arange(5, 9)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert output.item() == 0, "0 vs {}".format(output.item()) - - -class SSIMLossTests(unittest.TestCase): - def test_in_out(self): # pylint: disable=no-self-use - # test input == target - layer = SSIMLoss() - dummy_input = T.ones(4, 57, 128).float() - dummy_target = T.ones(4, 57, 128).float() - dummy_length = (T.ones(4) * 8).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() == 0.0 - - # test input != target - dummy_input = T.arange(0, 4 * 57 * 128) - dummy_input = dummy_input.reshape(4, 57, 128).float() - dummy_target = T.arange(-4 * 57 * 128, 0) - dummy_target = dummy_target.reshape(4, 57, 128).float() - dummy_target = -dummy_target - - dummy_length = (T.ones(4) * 58).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() >= 1.0, "0 vs {}".format(output.item()) - - # test if padded values of input makes any difference - dummy_input = T.ones(4, 57, 128).float() - dummy_target = T.zeros(4, 57, 128).float() - dummy_length = (T.arange(54, 58)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert output.item() == 0.0 - - dummy_input = T.rand(4, 57, 128).float() - dummy_target = dummy_input.detach() - dummy_length = (T.arange(54, 58)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert output.item() == 0, "0 vs {}".format(output.item()) - - # seq_len_norm = True - # test input == target - layer = L1LossMasked(seq_len_norm=True) - dummy_input = T.ones(4, 57, 128).float() - dummy_target = T.ones(4, 57, 128).float() - dummy_length = (T.ones(4) * 8).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() == 0.0 - - # test input != target - dummy_input = T.ones(4, 57, 128).float() - dummy_target = T.zeros(4, 57, 128).float() - dummy_length = (T.ones(4) * 8).long() - output = layer(dummy_input, dummy_target, dummy_length) - assert output.item() == 1.0, "1.0 vs {}".format(output.item()) - - # test if padded values of input makes any difference - dummy_input = T.ones(4, 57, 128).float() - dummy_target = T.zeros(4, 57, 128).float() - dummy_length = (T.arange(54, 58)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert abs(output.item() - 1.0) < 1e-5, "1.0 vs {}".format(output.item()) - - dummy_input = T.rand(4, 57, 128).float() - dummy_target = dummy_input.detach() - dummy_length = (T.arange(54, 58)).long() - mask = ((sequence_mask(dummy_length).float() - 1.0) * 100.0).unsqueeze(2) - output = layer(dummy_input + mask, dummy_target, dummy_length) - assert output.item() == 0, "0 vs {}".format(output.item()) - - -class BCELossTest(unittest.TestCase): - def test_in_out(self): # pylint: disable=no-self-use - layer = BCELossMasked(pos_weight=5.0) - - length = T.tensor([95]) - target = ( - 1.0 - sequence_mask(length - 1, 100).float() - ) # [0, 0, .... 1, 1] where the first 1 is the last mel frame - true_x = target * 200 - 100 # creates logits of [-100, -100, ... 100, 100] corresponding to target - zero_x = T.zeros(target.shape) - 100.0 # simulate logits if it never stops decoding - early_x = -200.0 * sequence_mask(length - 3, 100).float() + 100.0 # simulate logits on early stopping - late_x = -200.0 * sequence_mask(length + 1, 100).float() + 100.0 # simulate logits on late stopping - - loss = layer(true_x, target, length) - self.assertEqual(loss.item(), 0.0) - - loss = layer(early_x, target, length) - self.assertAlmostEqual(loss.item(), 2.1053, places=4) - - loss = layer(late_x, target, length) - self.assertAlmostEqual(loss.item(), 5.2632, places=4) - - loss = layer(zero_x, target, length) - self.assertAlmostEqual(loss.item(), 5.2632, places=4) - - # pos_weight should be < 1 to penalize early stopping - layer = BCELossMasked(pos_weight=0.2) - loss = layer(true_x, target, length) - self.assertEqual(loss.item(), 0.0) - - # when pos_weight < 1 overweight the early stopping loss - - loss_early = layer(early_x, target, length) - loss_late = layer(late_x, target, length) - self.assertGreater(loss_early.item(), loss_late.item()) diff --git a/spaces/asciicorp/hotel-chat/memory.py b/spaces/asciicorp/hotel-chat/memory.py deleted file mode 100644 index dd9f292d7566e0b2e4066001d6084cb34f546df0..0000000000000000000000000000000000000000 --- a/spaces/asciicorp/hotel-chat/memory.py +++ /dev/null @@ -1,3 +0,0 @@ -from langchain.memory import ConversationBufferMemory - -memory = ConversationBufferMemory(memory_key="chat_history") \ No newline at end of file diff --git a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Raj Singh.html b/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Raj Singh.html deleted file mode 100644 index 76923722296367e29690fcb50d1c701abc9e9e95..0000000000000000000000000000000000000000 --- a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Raj Singh.html +++ /dev/null @@ -1,134 +0,0 @@ - - - - Raj Singh - - - - -
    -

    Raj Singh

    - -
    -
    I'm generally interested in being a mentor because I want to help aspiring ML engineers who were in the same position I was in. Finding a job in machine learning can be really daunting and stressful and I would love to help make that process easier for mentees. I've found that my experiences with giving back to the community have been extremely rewarding and I want to continue doing it!


    Interview


    How did you hear about SM?
    • Mentee under Andy Seow (2019)
    Career
    • graduated in 2019
    • landed job Cultivate in 2020
    • building and developing NLP models
    • integrating into the platform (MLE)
    • cross-functional collab
    • the company got acquired by a Perseptics
    • hunting for the next job
    Mentorship experience?
    • took a lot of ownership / leadership at Cultivate (small company)
    • has an intern working under him now
    What are beginners lacking?
    • It's a marathon, not a sprint
    • new grads want immediate gratification (but it takes a while)
    • don't want to destroy your mental health
    • lots of ambiguity and constantly evolving space
    • no one rigid role that works
    • Less success with cold emails now than when I was a mentee (probably because more people are doing it)
    And how can you add value as a mentor?
    • depends on the mentee
    • so many different paths
    Ideal mentee
    • MLEs are ideal 
    • more on the younger side (new college grad)
    • did a good job networking - invented my own strategies
    • in my mentorship - Andy did not know a lot about networking -  but did know a lot about ML 
    -
    -

    Questions about SM?
    • whats has changed about advertising yourself as a mentor?
    • Is it still project based?
    • Just ISAs?

    -
    - -
    - - - \ No newline at end of file diff --git a/spaces/atticus/image-text-retrival-huster/misc/model.py b/spaces/atticus/image-text-retrival-huster/misc/model.py deleted file mode 100644 index 9a6cc59390c47c7223d434666c53a42713b97f83..0000000000000000000000000000000000000000 --- a/spaces/atticus/image-text-retrival-huster/misc/model.py +++ /dev/null @@ -1,128 +0,0 @@ -""" -****************** COPYRIGHT AND CONFIDENTIALITY INFORMATION ****************** -Copyright (c) 2018 [Thomson Licensing] -All Rights Reserved -This program contains proprietary information which is a trade secret/business \ -secret of [Thomson Licensing] and is protected, even if unpublished, under \ -applicable Copyright laws (including French droit d'auteur) and/or may be \ -subject to one or more patent(s). -Recipient is to retain this program in confidence and is not permitted to use \ -or make copies thereof other than as permitted in a written agreement with \ -[Thomson Licensing] unless otherwise expressly allowed by applicable laws or \ -by [Thomson Licensing] under express agreement. -Thomson Licensing is a company of the group TECHNICOLOR -******************************************************************************* -This scripts permits one to reproduce training and experiments of: - Engilberge, M., Chevallier, L., Pérez, P., & Cord, M. (2018, April). - Finding beans in burgers: Deep semantic-visual embedding with localization. - In Proceedings of CVPR (pp. 3984-3993) - -Author: Martin Engilberge -""" - -import torch -import torch.nn as nn - -from misc.config import path -from misc.weldonModel import ResNet_weldon -from sru import SRU - - -class SruEmb(nn.Module): - def __init__(self, nb_layer, dim_in, dim_out, dropout=0.25): - super(SruEmb, self).__init__() - - self.dim_out = dim_out - # SRU 作为文本特征提取 - self.rnn = SRU(dim_in, dim_out, num_layers=nb_layer, - dropout=dropout, rnn_dropout=dropout, - use_tanh=True, has_skip_term=True, - v1=True, rescale=False) - - def _select_last(self, x, lengths): - batch_size = x.size(0) - mask = x.data.new().resize_as_(x.data).fill_(0) - for i in range(batch_size): - mask[i][lengths[i] - 1].fill_(1) - x = x.mul(mask) - x = x.sum(1, keepdim=True).view(batch_size, self.dim_out) - return x - - def _process_lengths(self, input): - max_length = input.size(1) - # 获取每段文本的长度 - lengths = list( - max_length - input.data.eq(0).sum(1, keepdim=True).squeeze()) - return lengths - - def forward(self, input, lengths=None): - if lengths is None: - lengths = self._process_lengths(input) - x = input.permute(1, 0, 2) - # rnn - x, hn = self.rnn(x) - x = x.permute(1, 0, 2) - if lengths: - # 用mask抹除padding部分的权重 - x = self._select_last(x, lengths) - return x - - -class img_embedding(nn.Module): - - def __init__(self, args): - super(img_embedding, self).__init__() - # 图像backbone Resnet152 - model_weldon2 = ResNet_weldon(args, pretrained=False, weldon_pretrained_path=path["WELDON_CLASSIF_PRETRAINED"]) - - self.base_layer = nn.Sequential(*list(model_weldon2.children())[:-1]) - - # 关掉图像侧梯度 - for param in self.base_layer.parameters(): - param.requires_grad = False - - def forward(self, x): - x = self.base_layer(x) - x = x.view(x.size()[0], -1) - - return x - - # 图像激活图 - def get_activation_map(self, x): - x = self.base_layer[0](x) - act_map = self.base_layer[1](x) - act = self.base_layer[2](act_map) - return act, act_map - - -class joint_embedding(nn.Module): - - def __init__(self, args): - super(joint_embedding, self).__init__() - # 图像编码 - self.img_emb = torch.nn.DataParallel(img_embedding(args)) - # 描述编码 - self.cap_emb = SruEmb(args.sru, 620, args.dimemb) - # 全连接 - self.fc = torch.nn.DataParallel(nn.Linear(2400, args.dimemb, bias=True)) - # dropout层 - self.dropout = torch.nn.Dropout(p=0.5) - - def forward(self, imgs, caps, lengths): - # 图像侧 - if imgs is not None: - x_imgs = self.img_emb(imgs) - x_imgs = self.dropout(x_imgs) - x_imgs = self.fc(x_imgs) - x_imgs = x_imgs / torch.norm(x_imgs, 2, dim=1, keepdim=True).expand_as(x_imgs) - else: - x_imgs = None - - # 描述侧 - if caps is not None: - x_caps = self.cap_emb(caps, lengths=lengths) - x_caps = x_caps / torch.norm(x_caps, 2, dim=1, keepdim=True).expand_as(x_caps) - else: - x_caps = None - - return x_imgs, x_caps diff --git a/spaces/awacke1/MadLibs/app.py b/spaces/awacke1/MadLibs/app.py deleted file mode 100644 index 246aef9595f306ccf40b907b6ec345620c15429e..0000000000000000000000000000000000000000 --- a/spaces/awacke1/MadLibs/app.py +++ /dev/null @@ -1,97 +0,0 @@ - -import json -import random -import re -import dataclasses - -import streamlit as st - -from gamestate import persistent_game_state - -st.markdown("""MAD LIBS -(dataset is from [a Microsoft EMNLP paper](https://www.microsoft.com/en-us/download/details.aspx?id=55593)) -Fill in all the fields then click "Generate Story". -""") - - -with open('stories.json') as f: - stories = json.load(f) - - -@dataclasses.dataclass -class GameState: - story: str - game_number: int = 0 - -state = persistent_game_state(initial_state=GameState(random.choice(stories))) - - -if st.button("new story"): - state.story = random.choice(stories) - state.game_number += 1 - -pos = { - 'cc': 'Coordinating conjunction', - 'cd': 'Cardinal number', - 'dt': 'Determiner', - 'ex': 'Existential there', - 'fw': 'Foreign word', - 'in': 'Preposition or subordinating conjunction', - 'jj': 'Adjective', - 'jjr': 'Adjective, comparative', - 'jjs': 'Adjective, superlative', - 'ls': 'List item marker', - 'md': 'Modal', - 'nn': 'Noun, singular or mass', - 'nns': 'Noun, plural', - 'nnp': 'Proper noun, singular', - 'nnps': 'Proper noun, plural', - 'pdt': 'Predeterminer', - 'pos': 'Possessive ending', - 'prp': 'Personal pronoun', - 'prp$': 'Possessive pronoun', - 'rb': 'Adverb', - 'rbr': 'Adverb, comparative', - 'rbs': 'Adverb, superlative', - 'rp': 'Particle', - 'sym': 'Symbol', - 'to': 'to', - 'uh': 'Interjection', - 'vb': 'Verb, base form', - 'vbd': 'Verb, past tense', - 'vbg': 'Verb, gerund or present participle', - 'vbn': 'Verb, past participle', - 'vbp': 'Verb, non-3rd person singular present', - 'vbz': 'Verb, 3rd person singular present', - 'wdt': 'Wh-determiner', - 'wp': 'Wh-pronoun', - 'wp$': 'Possessive wh-pronoun', - 'wrb': 'Wh-adverb', - # others - 'animal': 'Animal', - 'body': 'Body part', - 'body_plural': 'Body part, plural', - 'food': 'Food', - 'liquid': 'Liquid', - } - - -regex = "<.*?::(.*?)/>" - -parts = re.split(regex, state.story) - -outparts = [] - -for i, part in enumerate(parts): - if i % 2 == 1: - # remove ':' - part = part.strip(':') - # use two-part key so that new stories get new text boxes - answer = st.text_input(pos.get(part, part), key=(state.game_number, i)) - - outparts.append(f"**{answer}**" if answer else "") - else: - outparts.append(part) - -if all(outparts) and st.button("generate madlib"): - st.markdown("".join(outparts)) \ No newline at end of file diff --git a/spaces/awacke1/sileod-deberta-v3-base-tasksource-nli/app.py b/spaces/awacke1/sileod-deberta-v3-base-tasksource-nli/app.py deleted file mode 100644 index a18a062169db4d4150d42560a2a9622ef65585b0..0000000000000000000000000000000000000000 --- a/spaces/awacke1/sileod-deberta-v3-base-tasksource-nli/app.py +++ /dev/null @@ -1,3 +0,0 @@ -# original: -import gradio as gr -gr.Interface.load("models/sileod/deberta-v3-base-tasksource-nli").launch() diff --git a/spaces/ayaanzaveri/detr/app.py b/spaces/ayaanzaveri/detr/app.py deleted file mode 100644 index 93fc176d4133a99872bc28745bcacc39ccf24b14..0000000000000000000000000000000000000000 --- a/spaces/ayaanzaveri/detr/app.py +++ /dev/null @@ -1,66 +0,0 @@ -from transformers import AutoFeatureExtractor, AutoModelForObjectDetection -import matplotlib.pyplot as plt -import matplotlib.patches as patches -from random import choice -from PIL import Image -import os -from matplotlib import rcParams, font_manager -import streamlit as st -import urllib.request -import requests - -extractor = AutoFeatureExtractor.from_pretrained("facebook/detr-resnet-50") - -model = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50") - -from transformers import pipeline - -pipe = pipeline('object-detection', model=model, feature_extractor=extractor) - -img_url = st.text_input('Image URL', 'https://images.unsplash.com/photo-1556911220-bff31c812dba?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=2468&q=80') - -st.caption('Downloading Image...') - -img_data = requests.get(img_url).content -with open('detect.jpg', 'wb') as handler: - handler.write(img_data) - -st.caption('Running Detection...') - -output = pipe(img_url) - -st.caption('Adding Predictions to Image...') - -fpath = "Poppins-SemiBold.ttf" -prop = font_manager.FontProperties(fname=fpath) - -img = Image.open('detect.jpg') -plt.figure(dpi=2400) - -# Create figure and axes -fig, ax = plt.subplots() - -# Display the image -ax.imshow(img) - -colors = ["#ef4444", "#f97316", "#eab308", "#84cc16", "#06b6d4", "#6366f1"] - -# Create a Rectangle patch -for prediction in output: - selected_color = choice(colors) - x, y, w, h = prediction['box']['xmin'], prediction['box']['ymin'], prediction['box']['xmax'] - prediction['box']['xmin'], prediction['box']['ymax'] - prediction['box']['ymin'] - rect = patches.FancyBboxPatch((x, y), w, h, linewidth=1.25, edgecolor=selected_color, facecolor='none', boxstyle="round,pad=-0.0040,rounding_size=10",) - ax.add_patch(rect) - plt.text(x, y-25, f"{prediction['label']}: {round(prediction['score']*100, 1)}%", fontsize=5, color=selected_color, fontproperties=prop) - -plt.axis('off') - -plt.savefig('detect-bbox.jpg', dpi=1200, bbox_inches='tight') - -image = Image.open('detect-bbox.jpg') - -st.image(image, caption='DETR Image') - -plt.show() - -st.caption('Done!') \ No newline at end of file diff --git a/spaces/ayushnoori/program-synthesis/strings.py b/spaces/ayushnoori/program-synthesis/strings.py deleted file mode 100644 index 23de68fbfb15f8be8d38a888fd62b3d862c0b5b1..0000000000000000000000000000000000000000 --- a/spaces/ayushnoori/program-synthesis/strings.py +++ /dev/null @@ -1,112 +0,0 @@ -''' -STRING OPERATORS -This file contains Python classes that define the string operators for program synthesis. -''' - -''' -CLASS DEFINITIONS -''' - -class StringVariable: - ''' - Class to represent an string variable. Note that position is the position of the variable in the input. - For example, if the input is ["a", "b", "c"] and the variable is the third element (i.e., "c"), then position = 2. - ''' - def __init__(self, position): - self.position = position # zero-indexed position of the variable in the arguments to program - self.type = str # type of the variable - self.weight = 1 # weight of the variable - - def evaluate(self, input = None): - - # check that input is not None - if input is None: - raise ValueError("Input is None.") - - # check that input is a list - if type(input) != list: - raise ValueError("Input is not a list.") - - # check that input is not empty - if len(input) == 0: - raise ValueError("Input is empty.") - - # check that position is valid - if self.position >= len(input): - raise ValueError(f"Position {self.position} is out of range for input of length {len(input)}.") - - return input[self.position] - - def str(self): - return f"x{self.position}" - -class StringConstant: - ''' - Class to represent an string constant. - ''' - def __init__(self, value): - self.value = value # value of the constant - self.type = str # type of the constant - self.weight = 1 # weight of the constant - - def evaluate(self, input = None): - return self.value - - def str(self): - return str(self.value) - -class Concatenate: - ''' - Operator to concatenate two string values. - ''' - def __init__(self): - self.arity = 2 # number of arguments - self.arg_types = [str, str] # argument types - self.return_type = str # return type - self.weight = 1 # weight - - def evaluate(self, x, y, input = None): - return x + y - - def str(self, x, y): - return f"Concat({x}, {y})" - -class Left: - ''' - Operator to get left substring. - ''' - def __init__(self): - self.arity = 2 # number of arguments - self.arg_types = [str, int] # argument types - self.return_type = str # return type - self.weight = 1 # weight - - def evaluate(self, x, y, input = None): - return x[:y] - - def str(self, x, y): - return f"Left({x}, {y})" - -class Right: - ''' - Operator to get right substring. - ''' - def __init__(self): - self.arity = 2 # number of arguments - self.arg_types = [str, int] # argument types - self.return_type = str # return type - self.weight = 1 # weight - - def evaluate(self, x, y, input = None): - return x[(y * -1):] - - def str(self, x, y): - return f"Right({x}, {y})" - - -''' -GLOBAL CONSTANTS -''' - -# define operators -string_operators = [Concatenate(), Left(), Right()] \ No newline at end of file diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/effects/ParallaxBarrierEffect.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/effects/ParallaxBarrierEffect.js deleted file mode 100644 index 4bdfc7d887f9d3ba7592f8591a1056d4d107b92e..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/effects/ParallaxBarrierEffect.js +++ /dev/null @@ -1,103 +0,0 @@ -/** - * @author mrdoob / http://mrdoob.com/ - * @author marklundin / http://mark-lundin.com/ - * @author alteredq / http://alteredqualia.com/ - */ - -THREE.ParallaxBarrierEffect = function ( renderer ) { - - var _camera = new THREE.OrthographicCamera( - 1, 1, 1, - 1, 0, 1 ); - - var _scene = new THREE.Scene(); - - var _stereo = new THREE.StereoCamera(); - - var _params = { minFilter: THREE.LinearFilter, magFilter: THREE.NearestFilter, format: THREE.RGBAFormat }; - - var _renderTargetL = new THREE.WebGLRenderTarget( 512, 512, _params ); - var _renderTargetR = new THREE.WebGLRenderTarget( 512, 512, _params ); - - var _material = new THREE.ShaderMaterial( { - - uniforms: { - - "mapLeft": { value: _renderTargetL.texture }, - "mapRight": { value: _renderTargetR.texture } - - }, - - vertexShader: [ - - "varying vec2 vUv;", - - "void main() {", - - " vUv = vec2( uv.x, uv.y );", - " gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );", - - "}" - - ].join( "\n" ), - - fragmentShader: [ - - "uniform sampler2D mapLeft;", - "uniform sampler2D mapRight;", - "varying vec2 vUv;", - - "void main() {", - - " vec2 uv = vUv;", - - " if ( ( mod( gl_FragCoord.y, 2.0 ) ) > 1.00 ) {", - - " gl_FragColor = texture2D( mapLeft, uv );", - - " } else {", - - " gl_FragColor = texture2D( mapRight, uv );", - - " }", - - "}" - - ].join( "\n" ) - - } ); - - var mesh = new THREE.Mesh( new THREE.PlaneBufferGeometry( 2, 2 ), _material ); - _scene.add( mesh ); - - this.setSize = function ( width, height ) { - - renderer.setSize( width, height ); - - var pixelRatio = renderer.getPixelRatio(); - - _renderTargetL.setSize( width * pixelRatio, height * pixelRatio ); - _renderTargetR.setSize( width * pixelRatio, height * pixelRatio ); - - }; - - this.render = function ( scene, camera ) { - - scene.updateMatrixWorld(); - - if ( camera.parent === null ) camera.updateMatrixWorld(); - - _stereo.update( camera ); - - renderer.setRenderTarget( _renderTargetL ); - renderer.clear(); - renderer.render( scene, _stereo.cameraL ); - - renderer.setRenderTarget( _renderTargetR ); - renderer.clear(); - renderer.render( scene, _stereo.cameraR ); - - renderer.setRenderTarget( null ); - renderer.render( _scene, _camera ); - - }; - -}; diff --git a/spaces/bankholdup/stylegan_petbreeder/e4e/editings/latent_editor.py b/spaces/bankholdup/stylegan_petbreeder/e4e/editings/latent_editor.py deleted file mode 100644 index 4bebca2f5c86f71b58fa1f30d24bfcb0da06d88f..0000000000000000000000000000000000000000 --- a/spaces/bankholdup/stylegan_petbreeder/e4e/editings/latent_editor.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch -import sys -sys.path.append(".") -sys.path.append("..") -from editings import ganspace, sefa -from utils.common import tensor2im - - -class LatentEditor(object): - def __init__(self, stylegan_generator, is_cars=False): - self.generator = stylegan_generator - self.is_cars = is_cars # Since the cars StyleGAN output is 384x512, there is a need to crop the 512x512 output. - - def apply_ganspace(self, latent, ganspace_pca, edit_directions): - edit_latents = ganspace.edit(latent, ganspace_pca, edit_directions) - return self._latents_to_image(edit_latents) - - def apply_interfacegan(self, latent, direction, factor=1, factor_range=None): - edit_latents = [] - if factor_range is not None: # Apply a range of editing factors. for example, (-5, 5) - for f in range(*factor_range): - edit_latent = latent + f * direction - edit_latents.append(edit_latent) - edit_latents = torch.cat(edit_latents) - else: - edit_latents = latent + factor * direction - return self._latents_to_image(edit_latents) - - def apply_sefa(self, latent, indices=[2, 3, 4, 5], **kwargs): - edit_latents = sefa.edit(self.generator, latent, indices, **kwargs) - return self._latents_to_image(edit_latents) - - # Currently, in order to apply StyleFlow editings, one should run inference, - # save the latent codes and load them form the official StyleFlow repository. - # def apply_styleflow(self): - # pass - - def _latents_to_image(self, latents): - with torch.no_grad(): - images, _ = self.generator([latents], randomize_noise=False, input_is_latent=True) - if self.is_cars: - images = images[:, :, 64:448, :] # 512x512 -> 384x512 - horizontal_concat_image = torch.cat(list(images), 2) - final_image = tensor2im(horizontal_concat_image) - return final_image diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_ffhq_degradation_dataset.py b/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_ffhq_degradation_dataset.py deleted file mode 100644 index fa56c03fb8e23df26aa6ed8442a86b3c676eec78..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_ffhq_degradation_dataset.py +++ /dev/null @@ -1,96 +0,0 @@ -import pytest -import yaml - -from gfpgan.data.ffhq_degradation_dataset import FFHQDegradationDataset - - -def test_ffhq_degradation_dataset(): - - with open('tests/data/test_ffhq_degradation_dataset.yml', mode='r') as f: - opt = yaml.load(f, Loader=yaml.FullLoader) - - dataset = FFHQDegradationDataset(opt) - assert dataset.io_backend_opt['type'] == 'disk' # io backend - assert len(dataset) == 1 # whether to read correct meta info - assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations - assert dataset.color_jitter_prob == 1 - - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 512, 512) - assert result['lq'].shape == (3, 512, 512) - assert result['gt_path'] == 'tests/data/gt/00000000.png' - - # ------------------ test with probability = 0 -------------------- # - opt['color_jitter_prob'] = 0 - opt['color_jitter_pt_prob'] = 0 - opt['gray_prob'] = 0 - opt['io_backend'] = dict(type='disk') - dataset = FFHQDegradationDataset(opt) - assert dataset.io_backend_opt['type'] == 'disk' # io backend - assert len(dataset) == 1 # whether to read correct meta info - assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations - assert dataset.color_jitter_prob == 0 - - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 512, 512) - assert result['lq'].shape == (3, 512, 512) - assert result['gt_path'] == 'tests/data/gt/00000000.png' - - # ------------------ test lmdb backend -------------------- # - opt['dataroot_gt'] = 'tests/data/ffhq_gt.lmdb' - opt['io_backend'] = dict(type='lmdb') - - dataset = FFHQDegradationDataset(opt) - assert dataset.io_backend_opt['type'] == 'lmdb' # io backend - assert len(dataset) == 1 # whether to read correct meta info - assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations - assert dataset.color_jitter_prob == 0 - - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 512, 512) - assert result['lq'].shape == (3, 512, 512) - assert result['gt_path'] == '00000000' - - # ------------------ test with crop_components -------------------- # - opt['crop_components'] = True - opt['component_path'] = 'tests/data/test_eye_mouth_landmarks.pth' - opt['eye_enlarge_ratio'] = 1.4 - opt['gt_gray'] = True - opt['io_backend'] = dict(type='lmdb') - - dataset = FFHQDegradationDataset(opt) - assert dataset.crop_components is True - - # test __getitem__ - result = dataset.__getitem__(0) - # check returned keys - expected_keys = ['gt', 'lq', 'gt_path', 'loc_left_eye', 'loc_right_eye', 'loc_mouth'] - assert set(expected_keys).issubset(set(result.keys())) - # check shape and contents - assert result['gt'].shape == (3, 512, 512) - assert result['lq'].shape == (3, 512, 512) - assert result['gt_path'] == '00000000' - assert result['loc_left_eye'].shape == (4, ) - assert result['loc_right_eye'].shape == (4, ) - assert result['loc_mouth'].shape == (4, ) - - # ------------------ lmdb backend should have paths ends with lmdb -------------------- # - with pytest.raises(ValueError): - opt['dataroot_gt'] = 'tests/data/gt' - opt['io_backend'] = dict(type='lmdb') - dataset = FFHQDegradationDataset(opt) diff --git a/spaces/bigPear/digitalWDF/examples/quantized_finetune_with_local_model.sh b/spaces/bigPear/digitalWDF/examples/quantized_finetune_with_local_model.sh deleted file mode 100644 index 209e77797ee4048996704589956f922909ec51b4..0000000000000000000000000000000000000000 --- a/spaces/bigPear/digitalWDF/examples/quantized_finetune_with_local_model.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash - -CUDA_VISIBLE_DEVICES=0 python ../src/finetune.py \ - --do_train \ - --model_name_or_path path_to_chatglm_model \ - --dataset alpaca_gpt4_zh \ - --dataset_dir ../data \ - --finetuning_type lora \ - --output_dir path_to_sft_checkpoint \ - --overwrite_cache \ - --per_device_train_batch_size 4 \ - --gradient_accumulation_steps 4 \ - --lr_scheduler_type cosine \ - --logging_steps 10 \ - --save_steps 1000 \ - --learning_rate 5e-5 \ - --num_train_epochs 3.0 \ - --quantization_bit 8 \ - --fp16 diff --git a/spaces/bioriAsaeru/text-to-voice/Anaganaga O Dheerudu How a 9-Year-Old Girl Saved Her Land from an Evil Queen.md b/spaces/bioriAsaeru/text-to-voice/Anaganaga O Dheerudu How a 9-Year-Old Girl Saved Her Land from an Evil Queen.md deleted file mode 100644 index 9ed91c9b5610fb3e2dea458b971e9dfcf858d909..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Anaganaga O Dheerudu How a 9-Year-Old Girl Saved Her Land from an Evil Queen.md +++ /dev/null @@ -1,5 +0,0 @@ -
    -

    Best biggest database of FREE PORN XXX movies. Start watching FREE HIGH QUALITY HD videos right now. You can watch anaganaga o dheerudu movierulz clip on your favorites from web, iPhone, Android, iPad and other your mobile phones.

    -

    anaganaga o dheerudu full movie online


    Download Zip ★★★ https://urloso.com/2uyRXu



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Formatter-2.9.0.4..epub Everything You Need to Know about E-book Formats and Standards.md b/spaces/bioriAsaeru/text-to-voice/Formatter-2.9.0.4..epub Everything You Need to Know about E-book Formats and Standards.md deleted file mode 100644 index 634801426db2746b6ca6ca8330499cd8853586cf..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Formatter-2.9.0.4..epub Everything You Need to Know about E-book Formats and Standards.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Formatter-2.9.0.4..epub


    Downloadhttps://urloso.com/2uyOQK



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/blmdsydm/faster-whisper-webui/app-network.py b/spaces/blmdsydm/faster-whisper-webui/app-network.py deleted file mode 100644 index 4f0e565b9029761d4b995fe32a65c58d1de55f53..0000000000000000000000000000000000000000 --- a/spaces/blmdsydm/faster-whisper-webui/app-network.py +++ /dev/null @@ -1,5 +0,0 @@ -# Run the app with no audio file restrictions, and make it available on the network -from app import create_ui -from src.config import ApplicationConfig - -create_ui(ApplicationConfig.create_default(input_audio_max_duration=-1, server_name="0.0.0.0")) \ No newline at end of file diff --git a/spaces/bluelu/Product-Photo-Analyzer/process.py b/spaces/bluelu/Product-Photo-Analyzer/process.py deleted file mode 100644 index ff21f44e4ae27a6ac18385335497c6bace72fbab..0000000000000000000000000000000000000000 --- a/spaces/bluelu/Product-Photo-Analyzer/process.py +++ /dev/null @@ -1,32 +0,0 @@ -import csv -import os -from huggingface_hub import Repository -from datetime import datetime -from torchvision.utils import save_image -from datasets import load_dataset, list_datasets -DATASET_REPO_URL = "https://huggingface.co/datasets/bluelu/bgi" -DATA_FILENAME = "bg.csv" -DATA_FILE = os.path.join("data", DATA_FILENAME) -IN1 = os.path.join("data", str(datetime.now().replace(microsecond=0)) + '_0.png') -IN2 = os.path.join("data", str(datetime.now().replace(microsecond=0)) + '_1.png') -IN3 = os.path.join("data", str(datetime.now().replace(microsecond=0)) + '_2.png') -HF_TOKEN = os.environ.get("S2") - - -def fiximg(imgs, mode): - print(list_datasets()) - repo = Repository( - local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN) - - - save_image(imgs[0], IN1) - save_image(imgs[1], IN2) - save_image(imgs[2], IN3) - if mode is None: - mode = 'default' - - with open(DATA_FILE, 'a') as file: - writer = csv.DictWriter(file, fieldnames=["ime"]) - writer.writerow({"ime": str(datetime.now()) + mode}) - - repo.push_to_hub() \ No newline at end of file diff --git a/spaces/bohmian/stock_intrinsic_value_calculator/app.py b/spaces/bohmian/stock_intrinsic_value_calculator/app.py deleted file mode 100644 index 58fc8c94334ee96e091758a170c2a72b7fdae63c..0000000000000000000000000000000000000000 --- a/spaces/bohmian/stock_intrinsic_value_calculator/app.py +++ /dev/null @@ -1,358 +0,0 @@ -# Importing required modules -import pandas as pd # for manipulating financial statements in dataframes -import numpy as np -import plotly.express as px # for visualizing results in interactive plots - -# To extract and parse fundamental data like beta and growth estimates from finviz website's HTML -import requests -from bs4 import BeautifulSoup as bs - -# For parsing financial statements data from financialmodelingprep api -from urllib.request import urlopen -import json - -def get_jsonparsed_data(url): - response = urlopen(url) - data = response.read().decode("utf-8") - return json.loads(data) - -# For Gradio App -import gradio as gr - - -# To read the environment variable to use in API requests later -import os -apiKey = os.environ['FMP_API_KEY'] # the environment variable is set in HuggingFace Spaces directly - - -############################################################################################################ -###### GET DATA FROM FINANCIAL MODELING PREP -############################################################################################################ - -# Financialmodelingprep api url -base_url = "https://financialmodelingprep.com/api/v3/" - -# get financial statements using financial modelling prep API -def get_financial_statements(ticker): - # quarterly cash flow statements for calculating latest trailing twelve months (TTM) free cash flow - columns_drop = ['acceptedDate', 'period', 'symbol', 'reportedCurrency', 'cik', 'fillingDate', 'depreciationAndAmortization', 'link', 'finalLink'] - q_cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey)) - q_cash_flow_statement = q_cash_flow_statement.set_index('date').drop(columns_drop, axis=1).iloc[:4] # extract for last 4 quarters - latest_year = int(q_cash_flow_statement.iloc[0]['calendarYear']) - - # annual cash flow statements - cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?apikey=' + apiKey)) - cash_flow_statement = cash_flow_statement.set_index('date').drop(columns_drop, axis=1) - - # combine annual and latest TTM cash flow statements - ttm_cash_flow_statement = q_cash_flow_statement.sum() # sum up last 4 quarters to get TTM cash flow - cash_flow_statement = cash_flow_statement[::-1].append(ttm_cash_flow_statement.rename('TTM')).drop(['netIncome'], axis=1) - final_cash_flow_statement = cash_flow_statement[::-1] # reverse list to show most recent ones first - - # quarterly balance sheet statements - columns_drop = ['acceptedDate', 'calendarYear', 'period', 'symbol', 'reportedCurrency', 'cik', 'fillingDate', 'link', 'finalLink'] - q_balance_statement = pd.DataFrame(get_jsonparsed_data(base_url+'balance-sheet-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey)) - q_balance_statement = q_balance_statement.set_index('date').drop(columns_drop, axis=1) - q_balance_statement = q_balance_statement.apply(pd.to_numeric, errors='coerce') - - return q_cash_flow_statement, cash_flow_statement, final_cash_flow_statement, q_balance_statement, latest_year - - -# check stability of cash flows -def plot_cash_flow(ticker, cash_flow_statement): - # DCF model works best only if the free cash flows are POSITIVE, STABLE and STEADILY INCREASING. - # So let's plot the graph and verify if this is the case. - fig_cash_flow = px.bar(cash_flow_statement , y='freeCashFlow', title=ticker + ' Free Cash Flows') - fig_cash_flow.update_xaxes(type='category', tickangle=270, title='Date') - fig_cash_flow.update_yaxes(title='Free Cash Flows') - #fig_cash_flow.show() - return fig_cash_flow - - -# get ttm cash flow, most recent total debt and cash & short term investment data from statements -def get_statements_data(final_cash_flow_statement, q_balance_statement): - cash_flow = final_cash_flow_statement.iloc[0]['freeCashFlow'] # ttm cash flow - total_debt = q_balance_statement.iloc[0]['totalDebt'] - cash_and_ST_investments = q_balance_statement.iloc[0]['cashAndShortTermInvestments'] - return cash_flow, total_debt, cash_and_ST_investments - - -############################################################################################################ -###### GET DATA FROM FINVIZ WEBSITE -############################################################################################################ - -# Price, EPS next Y/5Y, Beta, Number of Shares Outstanding -# Extract (using requests.get) and Parse (using Beautiful Soup) data from Finviz table in the Finviz website (see screenshot above), needed to calculate intrinsic value of stock. - -# List of data we want to extract from Finviz Table -# Price is the current stock price -# EPS next Y is the estimated earnings growth for next year -# EPS next 5Y is the estimated earnings growth for next 5 years (if this is not present on finviz, we will use EPS next Y instead) -# Beta captures the volatility of the stock, used for estimating discount rate later -# Shs Outstand is the number of shares present in the market -metric = ['Price', 'EPS next Y', 'EPS next 5Y', 'Beta', 'Shs Outstand'] - -def fundamental_metric(soup, metric): - # the table which stores the data in Finviz has html table attribute class of 'snapshot-td2' - return soup.find_all(text = metric)[-1].find_next(class_='snapshot-td2').text - -# get above metrics from finviz and store as a dict -def get_finviz_data(ticker): - try: - url = ("http://finviz.com/quote.ashx?t=" + ticker.lower()) - soup = bs(requests.get(url,headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0'}).content) - dict_finviz = {} - for m in metric: - dict_finviz[m] = fundamental_metric(soup,m) - for key, value in dict_finviz.items(): - # replace percentages - if (value[-1]=='%'): - dict_finviz[key] = value[:-1] - dict_finviz[key] = float(dict_finviz[key]) - # billion - if (value[-1]=='B'): - dict_finviz[key] = value[:-1] - dict_finviz[key] = float(dict_finviz[key])*1000000000 - # million - if (value[-1]=='M'): - dict_finviz[key] = value[:-1] - dict_finviz[key] = float(dict_finviz[key])*1000000 - try: - dict_finviz[key] = float(dict_finviz[key]) - except: - pass - except Exception as e: - print (e) - print ('Not successful parsing ' + ticker + ' data.') - return dict_finviz - - -def parse_finviz_dict(finviz_dict): - EPS_growth_5Y = finviz_dict['EPS next 5Y'] - # sometimes EPS next 5Y is empty and shows as a '-' string, in this case use EPS next Y - if isinstance(EPS_growth_5Y, str): - if not EPS_growth_5Y.isdigit(): - EPS_growth_5Y = finviz_dict['EPS next Y'] - EPS_growth_6Y_to_10Y = EPS_growth_5Y/2 # Half the previous growth rate, conservative estimate - # Long term = previous growth rate or around long term inflation rate, whichever is lower to be conservative estimate - long_term_growth_rate = np.minimum(EPS_growth_6Y_to_10Y, 3) - shares_outstanding = finviz_dict['Shs Outstand'] - beta = finviz_dict['Beta'] - current_price = finviz_dict['Price'] - - return EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate, beta, shares_outstanding, current_price - - -## Estimate Discount Rate from Beta -def estimate_discount_rate(beta): - # Beta shows the volatility of the stock, - # the higher the beta, we want to be more conservative by increasing the discount rate also. - discount_rate = 7 - if(beta<0.80): - discount_rate = 5 - elif(beta>=0.80 and beta<1): - discount_rate = 6 - elif(beta>=1 and beta<1.1): - discount_rate = 6.5 - elif(beta>=1.1 and beta<1.2): - discount_rate = 7 - elif(beta>=1.2 and beta<1.3): - discount_rate = 7.5 - elif(beta>=1.3 and beta<1.4): - discount_rate = 8 - elif(beta>=1.4 and beta<1.6): - discount_rate = 8.5 - elif(beta>=1.61): - discount_rate = 9 - - return discount_rate - - -############################################################################################################ -## Calculate Intrinsic Value -############################################################################################################ - -# 1. First Project Cash Flows from Year 1 to Year 10 using Present (TTM) Free Cash Flow -# 2. Discount the Cash Flows to Present Value -# 3. Calculate the Terminal Value after Year 10 (Discounted to Present Value) Assuming the Company will Grow at a Constant Steady Rate Forever (https://corporatefinanceinstitute.com/resources/financial-modeling/dcf-terminal-value-formula/) -# 4. Add the Cash Flows and the Terminal Value Up -# 5. Then Account for the Cash + Short Term Investments and Subtract Total Debt -# 6. Divide by Total Number of Shares Outstanding - -def calculate_intrinsic_value(latest_year, cash_flow, total_debt, cash_and_ST_investments, - EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate, - shares_outstanding, discount_rate, current_price): - - # Convert all percentages to decmials - EPS_growth_5Y_d = EPS_growth_5Y/100 - EPS_growth_6Y_to_10Y_d = EPS_growth_6Y_to_10Y/100 - long_term_growth_rate_d = long_term_growth_rate/100 - discount_rate_d = discount_rate/100 - # print("Discounted Cash Flows\n") - - # Lists of projected cash flows from year 1 to year 20 - cash_flow_list = [] - cash_flow_discounted_list = [] - year_list = [] - - # 1. First Project Cash Flows from Year 1 to Year 10 using Present (TTM) Free Cash Flow - # 2. Discount Each of the Cash Flows to Present Value - # Years 1 to 5 - for year in range(1, 6): - year_list.append(year + latest_year) - cash_flow*=(1 + EPS_growth_5Y_d) - cash_flow_list.append(cash_flow) - cash_flow_discounted = cash_flow/((1 + discount_rate_d)**year) - cash_flow_discounted_list.append(cash_flow_discounted) - # print("Year " + str(year + latest_year) + ": $" + str(cash_flow_discounted)) ## Print out the projected discounted cash flows - - # Years 6 to 10 - for year in range(6, 11): - year_list.append(year + latest_year) - cash_flow*=(1 + EPS_growth_6Y_to_10Y_d) - cash_flow_list.append(cash_flow) - cash_flow_discounted = cash_flow/((1 + discount_rate_d)**year) - cash_flow_discounted_list.append(cash_flow_discounted) - # print("Year " + str(year + latest_year) + ": $" + str(cash_flow_discounted)) ## Print out the projected discounted cash flows - - # Store all forecasted cash flows in dataframe - forecast_cash_flows_df = pd.DataFrame.from_dict({'Year': year_list, 'Cash Flow': cash_flow_list, 'Discounted Cash Flow': cash_flow_discounted_list}) - forecast_cash_flows_df = forecast_cash_flows_df.set_index('Year') - - # 3. Calculate the Terminal Value after Year 10 (Discounted to Present Value) - # These are All Future Cash Flows Summed Up - # Assuming the Company will Grow at a Constant Steady Rate Forever (https://corporatefinanceinstitute.com/resources/financial-modeling/dcf-terminal-value-formula/) - # Growth in Perpuity Approach - cashflow_10Y = cash_flow_discounted_list[-1] - # Formula to Calculate: https://corporatefinanceinstitute.com/resources/financial-modeling/dcf-terminal-value-formula/ - terminal_value = cashflow_10Y*(1+long_term_growth_rate_d)/(discount_rate_d-long_term_growth_rate_d) - - # 4. Add the Cash Flows and the Terminal Value Up - # 5. Then Account for the Cash + Short Term Investments and Subtract Total Debt - # 6. Divide by Total Number of Shares Outstanding - intrinsic_value = (sum(cash_flow_discounted_list) + terminal_value - total_debt + cash_and_ST_investments)/shares_outstanding - margin_of_safety = (1-current_price/intrinsic_value)*100 - - return forecast_cash_flows_df, terminal_value, intrinsic_value, margin_of_safety - - -# Plot forecasted cash flows from years 1 to 10, as well as the discounted cash flows -def plot_forecasted_cash_flows(ticker, forecast_cash_flows_df): - - fig_cash_forecast = px.bar(forecast_cash_flows_df, barmode='group', title=ticker + ' Projected Free Cash Flows') - fig_cash_forecast.update_xaxes(type='category', tickangle=270) - fig_cash_forecast.update_xaxes(tickangle=270, title='Forecasted Year') - fig_cash_forecast.update_yaxes(title='Free Cash Flows') - # fig_cash_forecast.show() - - return fig_cash_forecast - - -# chain all the steps from the functions above together -def run_all_steps(ticker): - ticker = ticker.upper() # make sure ticker is caps - - q_cash_flow_statement, cash_flow_statement, final_cash_flow_statement, q_balance_statement, latest_year = get_financial_statements(ticker) - - fig_cash_flow = plot_cash_flow(ticker, cash_flow_statement) - - cash_flow, total_debt, cash_and_ST_investments = get_statements_data(final_cash_flow_statement, q_balance_statement) - - finviz_dict = get_finviz_data(ticker) - - EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate, beta, shares_outstanding, current_price = parse_finviz_dict(finviz_dict) - - discount_rate = estimate_discount_rate(beta) - - forecast_cash_flows_df, terminal_value, intrinsic_value, margin_of_safety = calculate_intrinsic_value(latest_year, cash_flow, total_debt, cash_and_ST_investments, - EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate, - shares_outstanding, discount_rate, current_price) - - fig_cash_forecast = plot_forecasted_cash_flows(ticker, forecast_cash_flows_df) - - return q_cash_flow_statement.reset_index(), final_cash_flow_statement.reset_index(), q_balance_statement.reset_index(), fig_cash_flow, \ - str(EPS_growth_5Y) + '%', str(EPS_growth_6Y_to_10Y) + '%', str(long_term_growth_rate) + '%', \ - beta, shares_outstanding, current_price, \ - str(discount_rate) + '%', forecast_cash_flows_df.reset_index(), terminal_value, intrinsic_value, fig_cash_forecast, str(margin_of_safety) + '%' - - -# Gradio App and UI -with gr.Blocks() as app: - with gr.Row(): - gr.HTML("

    Bohmian's Stock Intrinsic Value Calculator

    ") - - with gr.Row(): - ticker = gr.Textbox("AAPL", label='Enter stock ticker to calculate its intrinsic value e.g. "AAPL"') - btn = gr.Button("Calculate Intrinsic Value") - - # Show intrinsic value calculation results - with gr.Row(): - gr.HTML("

    Calculated Intrinsic Value

    ") - - with gr.Row(): - intrinsic_value = gr.Text(label="Intrinsic Value (if this value is negative, it means current cash flow may be negative and this model WOULD NOT WORK, scroll down to check)") - current_price = gr.Text(label="Actual Stock Price") - margin_of_safety = gr.Text(label="Margin of Safety") - - # Show metrics obtained and estimated from FinViz website that were essential for calculations - with gr.Row(): - gr.HTML("

    Metrics Obtained (and Estimated) from FinViz Website

    ") - with gr.Row(): - gr.HTML("

    https://finviz.com/

    ") - - with gr.Row(): - EPS_growth_5Y = gr.Text(label="EPS Next 5Y (estimated EPS growth for next 5 years)") - EPS_growth_6Y_to_10Y = gr.Text(label="EPS growth for 6th to 10th year (estimated as half of 5Y rate)") - long_term_growth_rate = gr.Text(label="Long Term Growth Rate (estimated as the above or 3%, whichever is lower)") - - with gr.Row(): - beta = gr.Text(label="Beta (measures volatility of stock)") - discount_rate = gr.Text(label="Discount Rate (estimated from beta)") - shares_outstanding = gr.Text(label="Shares Outstanding") - - - # Show detailed actual historical financial statements - with gr.Row(): - gr.HTML("

    Actual Historical Financial Statements Data from Financial Modelling Prep API

    ") - with gr.Row(): - gr.HTML("

    https://site.financialmodelingprep.com/developer

    ") - with gr.Row(): - gr.HTML("

    IMPORTANT NOTE: DCF model works best only if the free cash flows are POSITIVE, STABLE and STEADILY INCREASING. Check if this is the case.

    ") - - with gr.Row(): - fig_cash_flow = gr.Plot(label="Historical Cash Flows") - - with gr.Row(): - q_cash_flow_statement = gr.DataFrame(label="Last 4 Quarterly Cash Flow Statements") - - with gr.Row(): - final_cash_flow_statement = gr.DataFrame(label="TTM + Annual Cash Flow Statements") - - with gr.Row(): - q_balance_statement = gr.DataFrame(label="Quarterly Balance Statements") - - - # Show forecasted cash flows and terminal value - with gr.Row(): - gr.HTML("

    Forecasted Cash Flows for Next 10 Years

    ") - - with gr.Row(): - fig_cash_forecast = gr.Plot(label="Forecasted Cash Flows") - - with gr.Row(): - forecast_cash_flows_df = gr.DataFrame(label="Forecasted Cash Flows") - - with gr.Row(): - terminal_value = gr.Text(label="Terminal Value (after 10th year)") - - btn.click(fn=run_all_steps, inputs=[ticker], - outputs=[q_cash_flow_statement, final_cash_flow_statement, q_balance_statement, fig_cash_flow, \ - EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate, beta, shares_outstanding, current_price, \ - discount_rate, forecast_cash_flows_df, terminal_value, intrinsic_value, fig_cash_forecast, margin_of_safety]) - - ticker.submit(fn=run_all_steps, inputs=[ticker], - outputs=[q_cash_flow_statement, final_cash_flow_statement, q_balance_statement, fig_cash_flow, \ - EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate, beta, shares_outstanding, current_price, \ - discount_rate, forecast_cash_flows_df, terminal_value, intrinsic_value, fig_cash_forecast, margin_of_safety]) - -app.launch() \ No newline at end of file diff --git a/spaces/bradarrML/stablediffusion-infinity/PyPatchMatch/csrc/nnf.h b/spaces/bradarrML/stablediffusion-infinity/PyPatchMatch/csrc/nnf.h deleted file mode 100644 index b5c144a4a58649906c9c87a40044b5118a00aa04..0000000000000000000000000000000000000000 --- a/spaces/bradarrML/stablediffusion-infinity/PyPatchMatch/csrc/nnf.h +++ /dev/null @@ -1,133 +0,0 @@ -#pragma once - -#include -#include "masked_image.h" - -class PatchDistanceMetric { -public: - PatchDistanceMetric(int patch_size) : m_patch_size(patch_size) {} - virtual ~PatchDistanceMetric() = default; - - inline int patch_size() const { return m_patch_size; } - virtual int operator()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const = 0; - static const int kDistanceScale; - -protected: - int m_patch_size; -}; - -class NearestNeighborField { -public: - NearestNeighborField() : m_source(), m_target(), m_field(), m_distance_metric(nullptr) { - // pass - } - NearestNeighborField(const MaskedImage &source, const MaskedImage &target, const PatchDistanceMetric *metric, int max_retry = 20) - : m_source(source), m_target(target), m_distance_metric(metric) { - m_field = cv::Mat(m_source.size(), CV_32SC3); - _randomize_field(max_retry); - } - NearestNeighborField(const MaskedImage &source, const MaskedImage &target, const PatchDistanceMetric *metric, const NearestNeighborField &other, int max_retry = 20) - : m_source(source), m_target(target), m_distance_metric(metric) { - m_field = cv::Mat(m_source.size(), CV_32SC3); - _initialize_field_from(other, max_retry); - } - - const MaskedImage &source() const { - return m_source; - } - const MaskedImage &target() const { - return m_target; - } - inline cv::Size source_size() const { - return m_source.size(); - } - inline cv::Size target_size() const { - return m_target.size(); - } - inline void set_source(const MaskedImage &source) { - m_source = source; - } - inline void set_target(const MaskedImage &target) { - m_target = target; - } - - inline int *mutable_ptr(int y, int x) { - return m_field.ptr(y, x); - } - inline const int *ptr(int y, int x) const { - return m_field.ptr(y, x); - } - - inline int at(int y, int x, int c) const { - return m_field.ptr(y, x)[c]; - } - inline int &at(int y, int x, int c) { - return m_field.ptr(y, x)[c]; - } - inline void set_identity(int y, int x) { - auto ptr = mutable_ptr(y, x); - ptr[0] = y, ptr[1] = x, ptr[2] = 0; - } - - void minimize(int nr_pass); - -private: - inline int _distance(int source_y, int source_x, int target_y, int target_x) { - return (*m_distance_metric)(m_source, source_y, source_x, m_target, target_y, target_x); - } - - void _randomize_field(int max_retry = 20, bool reset = true); - void _initialize_field_from(const NearestNeighborField &other, int max_retry); - void _minimize_link(int y, int x, int direction); - - MaskedImage m_source; - MaskedImage m_target; - cv::Mat m_field; // { y_target, x_target, distance_scaled } - const PatchDistanceMetric *m_distance_metric; -}; - - -class PatchSSDDistanceMetric : public PatchDistanceMetric { -public: - using PatchDistanceMetric::PatchDistanceMetric; - virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const; - static const int kSSDScale; -}; - -class DebugPatchSSDDistanceMetric : public PatchDistanceMetric { -public: - DebugPatchSSDDistanceMetric(int patch_size, int width, int height) : PatchDistanceMetric(patch_size), m_width(width), m_height(height) {} - virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const; -protected: - int m_width, m_height; -}; - -class RegularityGuidedPatchDistanceMetricV1 : public PatchDistanceMetric { -public: - RegularityGuidedPatchDistanceMetricV1(int patch_size, double dx1, double dy1, double dx2, double dy2, double weight) - : PatchDistanceMetric(patch_size), m_dx1(dx1), m_dy1(dy1), m_dx2(dx2), m_dy2(dy2), m_weight(weight) { - - assert(m_dy1 == 0); - assert(m_dx2 == 0); - m_scale = sqrt(m_dx1 * m_dx1 + m_dy2 * m_dy2) / 4; - } - virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const; - -protected: - double m_dx1, m_dy1, m_dx2, m_dy2; - double m_scale, m_weight; -}; - -class RegularityGuidedPatchDistanceMetricV2 : public PatchDistanceMetric { -public: - RegularityGuidedPatchDistanceMetricV2(int patch_size, cv::Mat ijmap, double weight) - : PatchDistanceMetric(patch_size), m_ijmap(ijmap), m_weight(weight) { - - } - virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const; - -protected: - cv::Mat m_ijmap; - double m_width, m_height, m_weight; -}; - diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py deleted file mode 100644 index 78ce7c6cb02fa01f6319d088349ff4f422001839..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import math -from typing import Any, List -import torch -from torch import nn -from torch.nn import functional as F - -from detectron2.config import CfgNode -from detectron2.structures import Instances - -from .. import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType -from .chart import DensePoseChartLoss -from .registry import DENSEPOSE_LOSS_REGISTRY -from .utils import BilinearInterpolationHelper, LossDict - - -@DENSEPOSE_LOSS_REGISTRY.register() -class DensePoseChartWithConfidenceLoss(DensePoseChartLoss): - """ """ - - def __init__(self, cfg: CfgNode): - super().__init__(cfg) - self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg) - if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO: - self.uv_loss_with_confidences = IIDIsotropicGaussianUVLoss( - self.confidence_model_cfg.uv_confidence.epsilon - ) - elif self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO: - self.uv_loss_with_confidences = IndepAnisotropicGaussianUVLoss( - self.confidence_model_cfg.uv_confidence.epsilon - ) - - def produce_fake_densepose_losses_uv(self, densepose_predictor_outputs: Any) -> LossDict: - """ - Overrides fake losses for fine segmentation and U/V coordinates to - include computation graphs for additional confidence parameters. - These are used when no suitable ground truth data was found in a batch. - The loss has a value 0 and is primarily used to construct the computation graph, - so that `DistributedDataParallel` has similar graphs on all GPUs and can - perform reduction properly. - - Args: - densepose_predictor_outputs: DensePose predictor outputs, an object - of a dataclass that is assumed to have the following attributes: - * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S] - * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S] - * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S] - Return: - dict: str -> tensor: dict of losses with the following entries: - * `loss_densepose_U`: has value 0 - * `loss_densepose_V`: has value 0 - * `loss_densepose_I`: has value 0 - """ - conf_type = self.confidence_model_cfg.uv_confidence.type - if self.confidence_model_cfg.uv_confidence.enabled: - loss_uv = ( - densepose_predictor_outputs.u.sum() + densepose_predictor_outputs.v.sum() - ) * 0 - if conf_type == DensePoseUVConfidenceType.IID_ISO: - loss_uv += densepose_predictor_outputs.sigma_2.sum() * 0 - elif conf_type == DensePoseUVConfidenceType.INDEP_ANISO: - loss_uv += ( - densepose_predictor_outputs.sigma_2.sum() - + densepose_predictor_outputs.kappa_u.sum() - + densepose_predictor_outputs.kappa_v.sum() - ) * 0 - return {"loss_densepose_UV": loss_uv} - else: - return super().produce_fake_densepose_losses_uv(densepose_predictor_outputs) - - def produce_densepose_losses_uv( - self, - proposals_with_gt: List[Instances], - densepose_predictor_outputs: Any, - packed_annotations: Any, - interpolator: BilinearInterpolationHelper, - j_valid_fg: torch.Tensor, - ) -> LossDict: - conf_type = self.confidence_model_cfg.uv_confidence.type - if self.confidence_model_cfg.uv_confidence.enabled: - u_gt = packed_annotations.u_gt[j_valid_fg] - u_est = interpolator.extract_at_points(densepose_predictor_outputs.u)[j_valid_fg] - v_gt = packed_annotations.v_gt[j_valid_fg] - v_est = interpolator.extract_at_points(densepose_predictor_outputs.v)[j_valid_fg] - sigma_2_est = interpolator.extract_at_points(densepose_predictor_outputs.sigma_2)[ - j_valid_fg - ] - if conf_type == DensePoseUVConfidenceType.IID_ISO: - return { - "loss_densepose_UV": ( - self.uv_loss_with_confidences(u_est, v_est, sigma_2_est, u_gt, v_gt) - * self.w_points - ) - } - elif conf_type in [DensePoseUVConfidenceType.INDEP_ANISO]: - kappa_u_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_u)[ - j_valid_fg - ] - kappa_v_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_v)[ - j_valid_fg - ] - return { - "loss_densepose_UV": ( - self.uv_loss_with_confidences( - u_est, v_est, sigma_2_est, kappa_u_est, kappa_v_est, u_gt, v_gt - ) - * self.w_points - ) - } - return super().produce_densepose_losses_uv( - proposals_with_gt, - densepose_predictor_outputs, - packed_annotations, - interpolator, - j_valid_fg, - ) - - -class IIDIsotropicGaussianUVLoss(nn.Module): - """ - Loss for the case of iid residuals with isotropic covariance: - $Sigma_i = sigma_i^2 I$ - The loss (negative log likelihood) is then: - $1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$, - where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates - difference between estimated and ground truth UV values - For details, see: - N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning - Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 - """ - - def __init__(self, sigma_lower_bound: float): - super(IIDIsotropicGaussianUVLoss, self).__init__() - self.sigma_lower_bound = sigma_lower_bound - self.log2pi = math.log(2 * math.pi) - - def forward( - self, - u: torch.Tensor, - v: torch.Tensor, - sigma_u: torch.Tensor, - target_u: torch.Tensor, - target_v: torch.Tensor, - ): - # compute $\sigma_i^2$ - # use sigma_lower_bound to avoid degenerate solution for variance - # (sigma -> 0) - sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound - # compute \|delta_i\|^2 - # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2 - # the total loss from the formula above: - loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta / sigma2) - return loss.sum() - - -class IndepAnisotropicGaussianUVLoss(nn.Module): - """ - Loss for the case of independent residuals with anisotropic covariances: - $Sigma_i = sigma_i^2 I + r_i r_i^T$ - The loss (negative log likelihood) is then: - $1/2 sum_{i=1}^n (log(2 pi) - + log sigma_i^2 (sigma_i^2 + ||r_i||^2) - + ||delta_i||^2 / sigma_i^2 - - ^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$, - where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates - difference between estimated and ground truth UV values - For details, see: - N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning - Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019 - """ - - def __init__(self, sigma_lower_bound: float): - super(IndepAnisotropicGaussianUVLoss, self).__init__() - self.sigma_lower_bound = sigma_lower_bound - self.log2pi = math.log(2 * math.pi) - - def forward( - self, - u: torch.Tensor, - v: torch.Tensor, - sigma_u: torch.Tensor, - kappa_u_est: torch.Tensor, - kappa_v_est: torch.Tensor, - target_u: torch.Tensor, - target_v: torch.Tensor, - ): - # compute $\sigma_i^2$ - sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound - # compute \|r_i\|^2 - # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - r_sqnorm2 = kappa_u_est**2 + kappa_v_est**2 - delta_u = u - target_u - delta_v = v - target_v - # compute \|delta_i\|^2 - # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - delta_sqnorm = delta_u**2 + delta_v**2 - delta_u_r_u = delta_u * kappa_u_est - delta_v_r_v = delta_v * kappa_v_est - # compute the scalar product - delta_r = delta_u_r_u + delta_v_r_v - # compute squared scalar product ^2 - # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. - delta_r_sqnorm = delta_r**2 - denom2 = sigma2 * (sigma2 + r_sqnorm2) - loss = 0.5 * ( - self.log2pi + torch.log(denom2) + delta_sqnorm / sigma2 - delta_r_sqnorm / denom2 - ) - return loss.sum() diff --git a/spaces/cahya/indonesian-story/app/SessionState.py b/spaces/cahya/indonesian-story/app/SessionState.py deleted file mode 100644 index 594c8dcbd209dae45555a7888af39be2c79fd9dc..0000000000000000000000000000000000000000 --- a/spaces/cahya/indonesian-story/app/SessionState.py +++ /dev/null @@ -1,107 +0,0 @@ -"""Hack to add per-session state to Streamlit. -Usage ------ ->>> import SessionState ->>> ->>> session_state = SessionState.get(user_name='', favorite_color='black') ->>> session_state.user_name -'' ->>> session_state.user_name = 'Mary' ->>> session_state.favorite_color -'black' -Since you set user_name above, next time your script runs this will be the -result: ->>> session_state = get(user_name='', favorite_color='black') ->>> session_state.user_name -'Mary' -""" -try: - import streamlit.ReportThread as ReportThread - from streamlit.server.Server import Server -except Exception: - # Streamlit >= 0.65.0 - import streamlit.report_thread as ReportThread - from streamlit.server.server import Server - - -class SessionState(object): - def __init__(self, **kwargs): - """A new SessionState object. - Parameters - ---------- - **kwargs : any - Default values for the session state. - Example - ------- - >>> session_state = SessionState(user_name='', favorite_color='black') - >>> session_state.user_name = 'Mary' - '' - >>> session_state.favorite_color - 'black' - """ - for key, val in kwargs.items(): - setattr(self, key, val) - - -def get(**kwargs): - """Gets a SessionState object for the current session. - Creates a new object if necessary. - Parameters - ---------- - **kwargs : any - Default values you want to add to the session state, if we're creating a - new one. - Example - ------- - >>> session_state = get(user_name='', favorite_color='black') - >>> session_state.user_name - '' - >>> session_state.user_name = 'Mary' - >>> session_state.favorite_color - 'black' - Since you set user_name above, next time your script runs this will be the - result: - >>> session_state = get(user_name='', favorite_color='black') - >>> session_state.user_name - 'Mary' - """ - # Hack to get the session object from Streamlit. - - ctx = ReportThread.get_report_ctx() - - this_session = None - - current_server = Server.get_current() - if hasattr(current_server, '_session_infos'): - # Streamlit < 0.56 - session_infos = Server.get_current()._session_infos.values() - else: - session_infos = Server.get_current()._session_info_by_id.values() - - for session_info in session_infos: - s = session_info.session - if ( - # Streamlit < 0.54.0 - (hasattr(s, '_main_dg') and s._main_dg == ctx.main_dg) - or - # Streamlit >= 0.54.0 - (not hasattr(s, '_main_dg') and s.enqueue == ctx.enqueue) - or - # Streamlit >= 0.65.2 - (not hasattr(s, '_main_dg') and s._uploaded_file_mgr == ctx.uploaded_file_mgr) - ): - this_session = s - - if this_session is None: - raise RuntimeError( - "Oh noes. Couldn't get your Streamlit Session object. " - 'Are you doing something fancy with threads?') - - # Got the session object! Now let's attach some state into it. - - if not hasattr(this_session, '_custom_session_state'): - this_session._custom_session_state = SessionState(**kwargs) - - return this_session._custom_session_state - -__all__ = ['get'] \ No newline at end of file diff --git a/spaces/ceshine/t5-paraphrasing/README.md b/spaces/ceshine/t5-paraphrasing/README.md deleted file mode 100644 index 0b6f56c6e8378551d2be3cb52a690490dbe483c5..0000000000000000000000000000000000000000 --- a/spaces/ceshine/t5-paraphrasing/README.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: T5 Paraphrasing -emoji: 🦀 -colorFrom: indigo -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false -license: apache-2.0 ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/chansung/LLM-As-Chatbot/chats/__init__.py b/spaces/chansung/LLM-As-Chatbot/chats/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/chansung/LLM-As-Chatbot/chats/alpaca.py b/spaces/chansung/LLM-As-Chatbot/chats/alpaca.py deleted file mode 100644 index bcbf8c175a4032ea5f43fd017bf25e528b69ce99..0000000000000000000000000000000000000000 --- a/spaces/chansung/LLM-As-Chatbot/chats/alpaca.py +++ /dev/null @@ -1,108 +0,0 @@ -import copy -import json -import global_vars -from chats import pre, post -from pingpong import PingPong -from gens.batch_gen import get_output_batch - -from pingpong.context import CtxLastWindowStrategy - -def build_prompts(ppmanager, user_message, global_context, win_size=3): - dummy_ppm = copy.deepcopy(ppmanager) - - dummy_ppm.ctx = global_context - for pingpong in dummy_ppm.pingpongs: - pong = pingpong.pong - first_sentence = pong.split("\n")[0] - if first_sentence != "" and \ - pre.contains_image_markdown(first_sentence): - pong = ' '.join(pong.split("\n")[1:]).strip() - pingpong.pong = pong - - lws = CtxLastWindowStrategy(win_size) - - prompt = lws(dummy_ppm) - return prompt - -def text_stream(ppmanager, streamer, model_thumbnail_tiny, model_type): - count = 0 - - for new_text in streamer: - if count == 0: - ppmanager.append_pong(f"![]({model_thumbnail_tiny})***[{model_type}]***\n") - count = count + 1 - - ppmanager.append_pong(new_text) - yield ppmanager, ppmanager.build_uis() - - yield ppmanager, ppmanager.build_uis() - -def summarize( - ppmanager, prompt_to_summarize, win_size, - temperature, top_p, top_k, repetition_penalty, max_new_tokens, - num_beams, use_cache, do_sample, eos_token_id, pad_token_id -): - ctx = ppmanager.ctx - last_pong = ppmanager.pingpongs[-1].pong - ppmanager.add_pingpong(PingPong(prompt_to_summarize, "")) - prompt = ppmanager.build_prompts(from_idx=-win_size) - - _, gen_config_summarization = pre.build_gen_config( - temperature, top_p, top_k, repetition_penalty, max_new_tokens, - num_beams, use_cache, do_sample, eos_token_id, pad_token_id - ) - summarize_output = get_output_batch( - global_vars.model, global_vars.tokenizer, [prompt], gen_config_summarization - )[0].split("### Response:")[-1].strip() - ppmanager.ctx = summarize_output - ppmanager.pop_pingpong() - return ppmanager - -def chat_stream( - idx, local_data, user_message, state, model_num, - global_context, ctx_num_lconv, ctx_sum_prompt, - res_temp, res_topp, res_topk, res_rpen, res_mnts, res_beams, res_cache, res_sample, res_eosid, res_padid, -): - res = [ - state["ppmanager_type"].from_json(json.dumps(ppm)) - for ppm in local_data - ] - - ppm = res[idx] - - # add_ping returns a prompt structured in Alpaca form - ppm.add_pingpong( - PingPong(user_message, "") - ) - prompt = build_prompts(ppm, user_message, global_context, ctx_num_lconv) - - # prepare text generating streamer & start generating - gen_kwargs, streamer = pre.build( - prompt, model_num, - res_temp, res_topp, res_topk, res_rpen, res_mnts, - res_beams, res_cache, res_sample, res_eosid, res_padid, - return_token_type_ids=False - ) - pre.start_gen(gen_kwargs, model_num) - - model_thumbnail_tiny = global_vars.models[model_num]["model_thumb_tiny"] - model_type = global_vars.models[model_num]["model_type"] - for ppmanager, uis in text_stream(ppm, streamer, model_thumbnail_tiny, model_type): - yield "", uis, prompt, str(res) - - ppm = post.strip_pong(ppm) - yield "", ppm.build_uis(), prompt, str(res) - - # summarization - # ppm.add_pingpong( - # PingPong(None, "![](https://i.postimg.cc/ZKNKDPBd/Vanilla-1s-209px.gif)") - # ) - # yield "", ppm.build_uis(), prompt, state - # ppm.pop_pingpong() - - # ppm = summarize( - # ppm, ctx_sum_prompt, ctx_num_lconv, - # sum_temp, sum_topp, sum_topk, sum_rpen, sum_mnts, - # sum_beams, sum_cache, sum_sample, sum_eosid, sum_padid - # ) - yield "", ppm.build_uis(), prompt, str(res) \ No newline at end of file diff --git a/spaces/cheetah003/HMMC_t2v_search/modules/__init__.py b/spaces/cheetah003/HMMC_t2v_search/modules/__init__.py deleted file mode 100644 index 661b40d6f6b2c7b9ff06c0160b09aaaaa3b8354a..0000000000000000000000000000000000000000 --- a/spaces/cheetah003/HMMC_t2v_search/modules/__init__.py +++ /dev/null @@ -1 +0,0 @@ -#encoding:utf-8 \ No newline at end of file diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/demo/OpenVINO/python/README.md b/spaces/chendl/compositional_test/multimodal/YOLOX/demo/OpenVINO/python/README.md deleted file mode 100644 index bbaf5aca44e86523c428735745848d2839351552..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/multimodal/YOLOX/demo/OpenVINO/python/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# YOLOX-OpenVINO in Python - -This tutorial includes a Python demo for OpenVINO, as well as some converted models. - -### Download OpenVINO models. - -| Model | Parameters | GFLOPs | Test Size | mAP | Weights | -|:------| :----: | :----: | :---: | :---: | :---: | -| [YOLOX-Nano](../../../exps/default/nano.py) | 0.91M | 1.08 | 416x416 | 25.8 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_nano_openvino.tar.gz) | -| [YOLOX-Tiny](../../../exps/default/yolox_tiny.py) | 5.06M | 6.45 | 416x416 |32.8 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_tiny_openvino.tar.gz) | -| [YOLOX-S](../../../exps/default/yolox_s.py) | 9.0M | 26.8 | 640x640 |40.5 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s_openvino.tar.gz) | -| [YOLOX-M](../../../exps/default/yolox_m.py) | 25.3M | 73.8 | 640x640 |47.2 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_m_openvino.tar.gz) | -| [YOLOX-L](../../../exps/default/yolox_l.py) | 54.2M | 155.6 | 640x640 |50.1 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_l_openvino.tar.gz) | -| [YOLOX-Darknet53](../../../exps/default/yolov3.py) | 63.72M | 185.3 | 640x640 |48.0 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_dark_openvino.tar.gz) | -| [YOLOX-X](../../../exps/default/yolox_x.py) | 99.1M | 281.9 | 640x640 |51.5 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_x_openvino.tar.gz) | - -## Install OpenVINO Toolkit - -Please visit [Openvino Homepage](https://docs.openvinotoolkit.org/latest/get_started_guides.html) for more details. - -## Set up the Environment - -### For Linux - -**Option1. Set up the environment tempororally. You need to run this command everytime you start a new shell window.** - -```shell -source /opt/intel/openvino_2021/bin/setupvars.sh -``` - -**Option2. Set up the environment permenantly.** - -*Step1.* For Linux: -```shell -vim ~/.bashrc -``` - -*Step2.* Add the following line into your file: - -```shell -source /opt/intel/openvino_2021/bin/setupvars.sh -``` - -*Step3.* Save and exit the file, then run: - -```shell -source ~/.bashrc -``` - - -## Convert model - -1. Export ONNX model - - Please refer to the [ONNX tutorial](https://github.com/Megvii-BaseDetection/YOLOX/demo/ONNXRuntime). **Note that you should set --opset to 10, otherwise your next step will fail.** - -2. Convert ONNX to OpenVINO - - ``` shell - cd /openvino_2021/deployment_tools/model_optimizer - ``` - - Install requirements for convert tool - - ```shell - sudo ./install_prerequisites/install_prerequisites_onnx.sh - ``` - - Then convert model. - ```shell - python3 mo.py --input_model --input_shape [--data_type FP16] - ``` - For example: - ```shell - python3 mo.py --input_model yolox.onnx --input_shape [1,3,640,640] --data_type FP16 --output_dir converted_output - ``` - -## Demo - -### python - -```shell -python openvino_inference.py -m -i -``` -or -```shell -python openvino_inference.py -m -i -o -s -d -``` - diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcnsImagePlugin.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcnsImagePlugin.py deleted file mode 100644 index 27cb89f735e2a1883b2b52ee42fd9ba34c5805fb..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcnsImagePlugin.py +++ /dev/null @@ -1,399 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# macOS icns file decoder, based on icns.py by Bob Ippolito. -# -# history: -# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies. -# 2020-04-04 Allow saving on all operating systems. -# -# Copyright (c) 2004 by Bob Ippolito. -# Copyright (c) 2004 by Secret Labs. -# Copyright (c) 2004 by Fredrik Lundh. -# Copyright (c) 2014 by Alastair Houghton. -# Copyright (c) 2020 by Pan Jing. -# -# See the README file for information on usage and redistribution. -# - -import io -import os -import struct -import sys - -from . import Image, ImageFile, PngImagePlugin, features - -enable_jpeg2k = features.check_codec("jpg_2000") -if enable_jpeg2k: - from . import Jpeg2KImagePlugin - -MAGIC = b"icns" -HEADERSIZE = 8 - - -def nextheader(fobj): - return struct.unpack(">4sI", fobj.read(HEADERSIZE)) - - -def read_32t(fobj, start_length, size): - # The 128x128 icon seems to have an extra header for some reason. - (start, length) = start_length - fobj.seek(start) - sig = fobj.read(4) - if sig != b"\x00\x00\x00\x00": - msg = "Unknown signature, expecting 0x00000000" - raise SyntaxError(msg) - return read_32(fobj, (start + 4, length - 4), size) - - -def read_32(fobj, start_length, size): - """ - Read a 32bit RGB icon resource. Seems to be either uncompressed or - an RLE packbits-like scheme. - """ - (start, length) = start_length - fobj.seek(start) - pixel_size = (size[0] * size[2], size[1] * size[2]) - sizesq = pixel_size[0] * pixel_size[1] - if length == sizesq * 3: - # uncompressed ("RGBRGBGB") - indata = fobj.read(length) - im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1) - else: - # decode image - im = Image.new("RGB", pixel_size, None) - for band_ix in range(3): - data = [] - bytesleft = sizesq - while bytesleft > 0: - byte = fobj.read(1) - if not byte: - break - byte = byte[0] - if byte & 0x80: - blocksize = byte - 125 - byte = fobj.read(1) - for i in range(blocksize): - data.append(byte) - else: - blocksize = byte + 1 - data.append(fobj.read(blocksize)) - bytesleft -= blocksize - if bytesleft <= 0: - break - if bytesleft != 0: - msg = f"Error reading channel [{repr(bytesleft)} left]" - raise SyntaxError(msg) - band = Image.frombuffer("L", pixel_size, b"".join(data), "raw", "L", 0, 1) - im.im.putband(band.im, band_ix) - return {"RGB": im} - - -def read_mk(fobj, start_length, size): - # Alpha masks seem to be uncompressed - start = start_length[0] - fobj.seek(start) - pixel_size = (size[0] * size[2], size[1] * size[2]) - sizesq = pixel_size[0] * pixel_size[1] - band = Image.frombuffer("L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1) - return {"A": band} - - -def read_png_or_jpeg2000(fobj, start_length, size): - (start, length) = start_length - fobj.seek(start) - sig = fobj.read(12) - if sig[:8] == b"\x89PNG\x0d\x0a\x1a\x0a": - fobj.seek(start) - im = PngImagePlugin.PngImageFile(fobj) - Image._decompression_bomb_check(im.size) - return {"RGBA": im} - elif ( - sig[:4] == b"\xff\x4f\xff\x51" - or sig[:4] == b"\x0d\x0a\x87\x0a" - or sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a" - ): - if not enable_jpeg2k: - msg = ( - "Unsupported icon subimage format (rebuild PIL " - "with JPEG 2000 support to fix this)" - ) - raise ValueError(msg) - # j2k, jpc or j2c - fobj.seek(start) - jp2kstream = fobj.read(length) - f = io.BytesIO(jp2kstream) - im = Jpeg2KImagePlugin.Jpeg2KImageFile(f) - Image._decompression_bomb_check(im.size) - if im.mode != "RGBA": - im = im.convert("RGBA") - return {"RGBA": im} - else: - msg = "Unsupported icon subimage format" - raise ValueError(msg) - - -class IcnsFile: - SIZES = { - (512, 512, 2): [(b"ic10", read_png_or_jpeg2000)], - (512, 512, 1): [(b"ic09", read_png_or_jpeg2000)], - (256, 256, 2): [(b"ic14", read_png_or_jpeg2000)], - (256, 256, 1): [(b"ic08", read_png_or_jpeg2000)], - (128, 128, 2): [(b"ic13", read_png_or_jpeg2000)], - (128, 128, 1): [ - (b"ic07", read_png_or_jpeg2000), - (b"it32", read_32t), - (b"t8mk", read_mk), - ], - (64, 64, 1): [(b"icp6", read_png_or_jpeg2000)], - (32, 32, 2): [(b"ic12", read_png_or_jpeg2000)], - (48, 48, 1): [(b"ih32", read_32), (b"h8mk", read_mk)], - (32, 32, 1): [ - (b"icp5", read_png_or_jpeg2000), - (b"il32", read_32), - (b"l8mk", read_mk), - ], - (16, 16, 2): [(b"ic11", read_png_or_jpeg2000)], - (16, 16, 1): [ - (b"icp4", read_png_or_jpeg2000), - (b"is32", read_32), - (b"s8mk", read_mk), - ], - } - - def __init__(self, fobj): - """ - fobj is a file-like object as an icns resource - """ - # signature : (start, length) - self.dct = dct = {} - self.fobj = fobj - sig, filesize = nextheader(fobj) - if not _accept(sig): - msg = "not an icns file" - raise SyntaxError(msg) - i = HEADERSIZE - while i < filesize: - sig, blocksize = nextheader(fobj) - if blocksize <= 0: - msg = "invalid block header" - raise SyntaxError(msg) - i += HEADERSIZE - blocksize -= HEADERSIZE - dct[sig] = (i, blocksize) - fobj.seek(blocksize, io.SEEK_CUR) - i += blocksize - - def itersizes(self): - sizes = [] - for size, fmts in self.SIZES.items(): - for fmt, reader in fmts: - if fmt in self.dct: - sizes.append(size) - break - return sizes - - def bestsize(self): - sizes = self.itersizes() - if not sizes: - msg = "No 32bit icon resources found" - raise SyntaxError(msg) - return max(sizes) - - def dataforsize(self, size): - """ - Get an icon resource as {channel: array}. Note that - the arrays are bottom-up like windows bitmaps and will likely - need to be flipped or transposed in some way. - """ - dct = {} - for code, reader in self.SIZES[size]: - desc = self.dct.get(code) - if desc is not None: - dct.update(reader(self.fobj, desc, size)) - return dct - - def getimage(self, size=None): - if size is None: - size = self.bestsize() - if len(size) == 2: - size = (size[0], size[1], 1) - channels = self.dataforsize(size) - - im = channels.get("RGBA", None) - if im: - return im - - im = channels.get("RGB").copy() - try: - im.putalpha(channels["A"]) - except KeyError: - pass - return im - - -## -# Image plugin for Mac OS icons. - - -class IcnsImageFile(ImageFile.ImageFile): - """ - PIL image support for Mac OS .icns files. - Chooses the best resolution, but will possibly load - a different size image if you mutate the size attribute - before calling 'load'. - - The info dictionary has a key 'sizes' that is a list - of sizes that the icns file has. - """ - - format = "ICNS" - format_description = "Mac OS icns resource" - - def _open(self): - self.icns = IcnsFile(self.fp) - self.mode = "RGBA" - self.info["sizes"] = self.icns.itersizes() - self.best_size = self.icns.bestsize() - self.size = ( - self.best_size[0] * self.best_size[2], - self.best_size[1] * self.best_size[2], - ) - - @property - def size(self): - return self._size - - @size.setter - def size(self, value): - info_size = value - if info_size not in self.info["sizes"] and len(info_size) == 2: - info_size = (info_size[0], info_size[1], 1) - if ( - info_size not in self.info["sizes"] - and len(info_size) == 3 - and info_size[2] == 1 - ): - simple_sizes = [ - (size[0] * size[2], size[1] * size[2]) for size in self.info["sizes"] - ] - if value in simple_sizes: - info_size = self.info["sizes"][simple_sizes.index(value)] - if info_size not in self.info["sizes"]: - msg = "This is not one of the allowed sizes of this image" - raise ValueError(msg) - self._size = value - - def load(self): - if len(self.size) == 3: - self.best_size = self.size - self.size = ( - self.best_size[0] * self.best_size[2], - self.best_size[1] * self.best_size[2], - ) - - px = Image.Image.load(self) - if self.im is not None and self.im.size == self.size: - # Already loaded - return px - self.load_prepare() - # This is likely NOT the best way to do it, but whatever. - im = self.icns.getimage(self.best_size) - - # If this is a PNG or JPEG 2000, it won't be loaded yet - px = im.load() - - self.im = im.im - self.mode = im.mode - self.size = im.size - - return px - - -def _save(im, fp, filename): - """ - Saves the image as a series of PNG files, - that are then combined into a .icns file. - """ - if hasattr(fp, "flush"): - fp.flush() - - sizes = { - b"ic07": 128, - b"ic08": 256, - b"ic09": 512, - b"ic10": 1024, - b"ic11": 32, - b"ic12": 64, - b"ic13": 256, - b"ic14": 512, - } - provided_images = {im.width: im for im in im.encoderinfo.get("append_images", [])} - size_streams = {} - for size in set(sizes.values()): - image = ( - provided_images[size] - if size in provided_images - else im.resize((size, size)) - ) - - temp = io.BytesIO() - image.save(temp, "png") - size_streams[size] = temp.getvalue() - - entries = [] - for type, size in sizes.items(): - stream = size_streams[size] - entries.append( - {"type": type, "size": HEADERSIZE + len(stream), "stream": stream} - ) - - # Header - fp.write(MAGIC) - file_length = HEADERSIZE # Header - file_length += HEADERSIZE + 8 * len(entries) # TOC - file_length += sum(entry["size"] for entry in entries) - fp.write(struct.pack(">i", file_length)) - - # TOC - fp.write(b"TOC ") - fp.write(struct.pack(">i", HEADERSIZE + len(entries) * HEADERSIZE)) - for entry in entries: - fp.write(entry["type"]) - fp.write(struct.pack(">i", entry["size"])) - - # Data - for entry in entries: - fp.write(entry["type"]) - fp.write(struct.pack(">i", entry["size"])) - fp.write(entry["stream"]) - - if hasattr(fp, "flush"): - fp.flush() - - -def _accept(prefix): - return prefix[:4] == MAGIC - - -Image.register_open(IcnsImageFile.format, IcnsImageFile, _accept) -Image.register_extension(IcnsImageFile.format, ".icns") - -Image.register_save(IcnsImageFile.format, _save) -Image.register_mime(IcnsImageFile.format, "image/icns") - -if __name__ == "__main__": - if len(sys.argv) < 2: - print("Syntax: python3 IcnsImagePlugin.py [file]") - sys.exit() - - with open(sys.argv[1], "rb") as fp: - imf = IcnsImageFile(fp) - for size in imf.info["sizes"]: - imf.size = size - imf.save("out-%s-%s-%s.png" % size) - with Image.open(sys.argv[1]) as im: - im.save("out.png") - if sys.platform == "windows": - os.startfile("out.png") diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcoImagePlugin.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcoImagePlugin.py deleted file mode 100644 index a188f8fdcea46e5cb9423a3c4572d88d93890fc6..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcoImagePlugin.py +++ /dev/null @@ -1,358 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# Windows Icon support for PIL -# -# History: -# 96-05-27 fl Created -# -# Copyright (c) Secret Labs AB 1997. -# Copyright (c) Fredrik Lundh 1996. -# -# See the README file for information on usage and redistribution. -# - -# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis -# . -# https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki -# -# Icon format references: -# * https://en.wikipedia.org/wiki/ICO_(file_format) -# * https://msdn.microsoft.com/en-us/library/ms997538.aspx - - -import warnings -from io import BytesIO -from math import ceil, log - -from . import BmpImagePlugin, Image, ImageFile, PngImagePlugin -from ._binary import i16le as i16 -from ._binary import i32le as i32 -from ._binary import o8 -from ._binary import o16le as o16 -from ._binary import o32le as o32 - -# -# -------------------------------------------------------------------- - -_MAGIC = b"\0\0\1\0" - - -def _save(im, fp, filename): - fp.write(_MAGIC) # (2+2) - bmp = im.encoderinfo.get("bitmap_format") == "bmp" - sizes = im.encoderinfo.get( - "sizes", - [(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (256, 256)], - ) - frames = [] - provided_ims = [im] + im.encoderinfo.get("append_images", []) - width, height = im.size - for size in sorted(set(sizes)): - if size[0] > width or size[1] > height or size[0] > 256 or size[1] > 256: - continue - - for provided_im in provided_ims: - if provided_im.size != size: - continue - frames.append(provided_im) - if bmp: - bits = BmpImagePlugin.SAVE[provided_im.mode][1] - bits_used = [bits] - for other_im in provided_ims: - if other_im.size != size: - continue - bits = BmpImagePlugin.SAVE[other_im.mode][1] - if bits not in bits_used: - # Another image has been supplied for this size - # with a different bit depth - frames.append(other_im) - bits_used.append(bits) - break - else: - # TODO: invent a more convenient method for proportional scalings - frame = provided_im.copy() - frame.thumbnail(size, Image.Resampling.LANCZOS, reducing_gap=None) - frames.append(frame) - fp.write(o16(len(frames))) # idCount(2) - offset = fp.tell() + len(frames) * 16 - for frame in frames: - width, height = frame.size - # 0 means 256 - fp.write(o8(width if width < 256 else 0)) # bWidth(1) - fp.write(o8(height if height < 256 else 0)) # bHeight(1) - - bits, colors = BmpImagePlugin.SAVE[frame.mode][1:] if bmp else (32, 0) - fp.write(o8(colors)) # bColorCount(1) - fp.write(b"\0") # bReserved(1) - fp.write(b"\0\0") # wPlanes(2) - fp.write(o16(bits)) # wBitCount(2) - - image_io = BytesIO() - if bmp: - frame.save(image_io, "dib") - - if bits != 32: - and_mask = Image.new("1", size) - ImageFile._save( - and_mask, image_io, [("raw", (0, 0) + size, 0, ("1", 0, -1))] - ) - else: - frame.save(image_io, "png") - image_io.seek(0) - image_bytes = image_io.read() - if bmp: - image_bytes = image_bytes[:8] + o32(height * 2) + image_bytes[12:] - bytes_len = len(image_bytes) - fp.write(o32(bytes_len)) # dwBytesInRes(4) - fp.write(o32(offset)) # dwImageOffset(4) - current = fp.tell() - fp.seek(offset) - fp.write(image_bytes) - offset = offset + bytes_len - fp.seek(current) - - -def _accept(prefix): - return prefix[:4] == _MAGIC - - -class IcoFile: - def __init__(self, buf): - """ - Parse image from file-like object containing ico file data - """ - - # check magic - s = buf.read(6) - if not _accept(s): - msg = "not an ICO file" - raise SyntaxError(msg) - - self.buf = buf - self.entry = [] - - # Number of items in file - self.nb_items = i16(s, 4) - - # Get headers for each item - for i in range(self.nb_items): - s = buf.read(16) - - icon_header = { - "width": s[0], - "height": s[1], - "nb_color": s[2], # No. of colors in image (0 if >=8bpp) - "reserved": s[3], - "planes": i16(s, 4), - "bpp": i16(s, 6), - "size": i32(s, 8), - "offset": i32(s, 12), - } - - # See Wikipedia - for j in ("width", "height"): - if not icon_header[j]: - icon_header[j] = 256 - - # See Wikipedia notes about color depth. - # We need this just to differ images with equal sizes - icon_header["color_depth"] = ( - icon_header["bpp"] - or ( - icon_header["nb_color"] != 0 - and ceil(log(icon_header["nb_color"], 2)) - ) - or 256 - ) - - icon_header["dim"] = (icon_header["width"], icon_header["height"]) - icon_header["square"] = icon_header["width"] * icon_header["height"] - - self.entry.append(icon_header) - - self.entry = sorted(self.entry, key=lambda x: x["color_depth"]) - # ICO images are usually squares - # self.entry = sorted(self.entry, key=lambda x: x['width']) - self.entry = sorted(self.entry, key=lambda x: x["square"]) - self.entry.reverse() - - def sizes(self): - """ - Get a list of all available icon sizes and color depths. - """ - return {(h["width"], h["height"]) for h in self.entry} - - def getentryindex(self, size, bpp=False): - for i, h in enumerate(self.entry): - if size == h["dim"] and (bpp is False or bpp == h["color_depth"]): - return i - return 0 - - def getimage(self, size, bpp=False): - """ - Get an image from the icon - """ - return self.frame(self.getentryindex(size, bpp)) - - def frame(self, idx): - """ - Get an image from frame idx - """ - - header = self.entry[idx] - - self.buf.seek(header["offset"]) - data = self.buf.read(8) - self.buf.seek(header["offset"]) - - if data[:8] == PngImagePlugin._MAGIC: - # png frame - im = PngImagePlugin.PngImageFile(self.buf) - Image._decompression_bomb_check(im.size) - else: - # XOR + AND mask bmp frame - im = BmpImagePlugin.DibImageFile(self.buf) - Image._decompression_bomb_check(im.size) - - # change tile dimension to only encompass XOR image - im._size = (im.size[0], int(im.size[1] / 2)) - d, e, o, a = im.tile[0] - im.tile[0] = d, (0, 0) + im.size, o, a - - # figure out where AND mask image starts - bpp = header["bpp"] - if 32 == bpp: - # 32-bit color depth icon image allows semitransparent areas - # PIL's DIB format ignores transparency bits, recover them. - # The DIB is packed in BGRX byte order where X is the alpha - # channel. - - # Back up to start of bmp data - self.buf.seek(o) - # extract every 4th byte (eg. 3,7,11,15,...) - alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4] - - # convert to an 8bpp grayscale image - mask = Image.frombuffer( - "L", # 8bpp - im.size, # (w, h) - alpha_bytes, # source chars - "raw", # raw decoder - ("L", 0, -1), # 8bpp inverted, unpadded, reversed - ) - else: - # get AND image from end of bitmap - w = im.size[0] - if (w % 32) > 0: - # bitmap row data is aligned to word boundaries - w += 32 - (im.size[0] % 32) - - # the total mask data is - # padded row size * height / bits per char - - total_bytes = int((w * im.size[1]) / 8) - and_mask_offset = header["offset"] + header["size"] - total_bytes - - self.buf.seek(and_mask_offset) - mask_data = self.buf.read(total_bytes) - - # convert raw data to image - mask = Image.frombuffer( - "1", # 1 bpp - im.size, # (w, h) - mask_data, # source chars - "raw", # raw decoder - ("1;I", int(w / 8), -1), # 1bpp inverted, padded, reversed - ) - - # now we have two images, im is XOR image and mask is AND image - - # apply mask image as alpha channel - im = im.convert("RGBA") - im.putalpha(mask) - - return im - - -## -# Image plugin for Windows Icon files. - - -class IcoImageFile(ImageFile.ImageFile): - """ - PIL read-only image support for Microsoft Windows .ico files. - - By default the largest resolution image in the file will be loaded. This - can be changed by altering the 'size' attribute before calling 'load'. - - The info dictionary has a key 'sizes' that is a list of the sizes available - in the icon file. - - Handles classic, XP and Vista icon formats. - - When saving, PNG compression is used. Support for this was only added in - Windows Vista. If you are unable to view the icon in Windows, convert the - image to "RGBA" mode before saving. - - This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis - . - https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki - """ - - format = "ICO" - format_description = "Windows Icon" - - def _open(self): - self.ico = IcoFile(self.fp) - self.info["sizes"] = self.ico.sizes() - self.size = self.ico.entry[0]["dim"] - self.load() - - @property - def size(self): - return self._size - - @size.setter - def size(self, value): - if value not in self.info["sizes"]: - msg = "This is not one of the allowed sizes of this image" - raise ValueError(msg) - self._size = value - - def load(self): - if self.im is not None and self.im.size == self.size: - # Already loaded - return Image.Image.load(self) - im = self.ico.getimage(self.size) - # if tile is PNG, it won't really be loaded yet - im.load() - self.im = im.im - self.pyaccess = None - self.mode = im.mode - if im.size != self.size: - warnings.warn("Image was not the expected size") - - index = self.ico.getentryindex(self.size) - sizes = list(self.info["sizes"]) - sizes[index] = im.size - self.info["sizes"] = set(sizes) - - self.size = im.size - - def load_seek(self): - # Flag the ImageFile.Parser so that it - # just does all the decode at the end. - pass - - -# -# -------------------------------------------------------------------- - - -Image.register_open(IcoImageFile.format, IcoImageFile, _accept) -Image.register_save(IcoImageFile.format, _save) -Image.register_extension(IcoImageFile.format, ".ico") - -Image.register_mime(IcoImageFile.format, "image/x-icon") diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/MpoImagePlugin.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/MpoImagePlugin.py deleted file mode 100644 index f9261c77d6862d7def90c6136dff6449241b0690..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/MpoImagePlugin.py +++ /dev/null @@ -1,197 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# MPO file handling -# -# See "Multi-Picture Format" (CIPA DC-007-Translation 2009, Standard of the -# Camera & Imaging Products Association) -# -# The multi-picture object combines multiple JPEG images (with a modified EXIF -# data format) into a single file. While it can theoretically be used much like -# a GIF animation, it is commonly used to represent 3D photographs and is (as -# of this writing) the most commonly used format by 3D cameras. -# -# History: -# 2014-03-13 Feneric Created -# -# See the README file for information on usage and redistribution. -# - -import itertools -import os -import struct - -from . import ( - ExifTags, - Image, - ImageFile, - ImageSequence, - JpegImagePlugin, - TiffImagePlugin, -) -from ._binary import i16be as i16 -from ._binary import o32le - -# def _accept(prefix): -# return JpegImagePlugin._accept(prefix) - - -def _save(im, fp, filename): - JpegImagePlugin._save(im, fp, filename) - - -def _save_all(im, fp, filename): - append_images = im.encoderinfo.get("append_images", []) - if not append_images: - try: - animated = im.is_animated - except AttributeError: - animated = False - if not animated: - _save(im, fp, filename) - return - - mpf_offset = 28 - offsets = [] - for imSequence in itertools.chain([im], append_images): - for im_frame in ImageSequence.Iterator(imSequence): - if not offsets: - # APP2 marker - im_frame.encoderinfo["extra"] = ( - b"\xFF\xE2" + struct.pack(">H", 6 + 82) + b"MPF\0" + b" " * 82 - ) - exif = im_frame.encoderinfo.get("exif") - if isinstance(exif, Image.Exif): - exif = exif.tobytes() - im_frame.encoderinfo["exif"] = exif - if exif: - mpf_offset += 4 + len(exif) - - JpegImagePlugin._save(im_frame, fp, filename) - offsets.append(fp.tell()) - else: - im_frame.save(fp, "JPEG") - offsets.append(fp.tell() - offsets[-1]) - - ifd = TiffImagePlugin.ImageFileDirectory_v2() - ifd[0xB000] = b"0100" - ifd[0xB001] = len(offsets) - - mpentries = b"" - data_offset = 0 - for i, size in enumerate(offsets): - if i == 0: - mptype = 0x030000 # Baseline MP Primary Image - else: - mptype = 0x000000 # Undefined - mpentries += struct.pack(" 1 - self._fp = self.fp # FIXME: hack - self._fp.seek(self.__mpoffsets[0]) # get ready to read first frame - self.__frame = 0 - self.offset = 0 - # for now we can only handle reading and individual frame extraction - self.readonly = 1 - - def load_seek(self, pos): - self._fp.seek(pos) - - def seek(self, frame): - if not self._seek_check(frame): - return - self.fp = self._fp - self.offset = self.__mpoffsets[frame] - - self.fp.seek(self.offset + 2) # skip SOI marker - segment = self.fp.read(2) - if not segment: - msg = "No data found for frame" - raise ValueError(msg) - self._size = self._initial_size - if i16(segment) == 0xFFE1: # APP1 - n = i16(self.fp.read(2)) - 2 - self.info["exif"] = ImageFile._safe_read(self.fp, n) - self._reload_exif() - - mptype = self.mpinfo[0xB002][frame]["Attribute"]["MPType"] - if mptype.startswith("Large Thumbnail"): - exif = self.getexif().get_ifd(ExifTags.IFD.Exif) - if 40962 in exif and 40963 in exif: - self._size = (exif[40962], exif[40963]) - elif "exif" in self.info: - del self.info["exif"] - self._reload_exif() - - self.tile = [("jpeg", (0, 0) + self.size, self.offset, (self.mode, ""))] - self.__frame = frame - - def tell(self): - return self.__frame - - @staticmethod - def adopt(jpeg_instance, mpheader=None): - """ - Transform the instance of JpegImageFile into - an instance of MpoImageFile. - After the call, the JpegImageFile is extended - to be an MpoImageFile. - - This is essentially useful when opening a JPEG - file that reveals itself as an MPO, to avoid - double call to _open. - """ - jpeg_instance.__class__ = MpoImageFile - jpeg_instance._after_jpeg_open(mpheader) - return jpeg_instance - - -# --------------------------------------------------------------------- -# Registry stuff - -# Note that since MPO shares a factory with JPEG, we do not need to do a -# separate registration for it here. -# Image.register_open(MpoImageFile.format, -# JpegImagePlugin.jpeg_factory, _accept) -Image.register_save(MpoImageFile.format, _save) -Image.register_save_all(MpoImageFile.format, _save_all) - -Image.register_extension(MpoImageFile.format, ".mpo") - -Image.register_mime(MpoImageFile.format, "image/mpo") diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/oxml/settings.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/oxml/settings.py deleted file mode 100644 index fd319ad707f3f5e8313bd6e94ed7f17f811f2787..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/oxml/settings.py +++ /dev/null @@ -1,64 +0,0 @@ -# encoding: utf-8 - -"""Custom element classes related to document settings""" - -from __future__ import absolute_import, division, print_function, unicode_literals - -from docx.oxml.xmlchemy import BaseOxmlElement, ZeroOrOne - - -class CT_Settings(BaseOxmlElement): - """`w:settings` element, root element for the settings part""" - - _tag_seq = ( - "w:writeProtection", "w:view", "w:zoom", "w:removePersonalInformation", - "w:removeDateAndTime", "w:doNotDisplayPageBoundaries", - "w:displayBackgroundShape", "w:printPostScriptOverText", - "w:printFractionalCharacterWidth", "w:printFormsData", "w:embedTrueTypeFonts", - "w:embedSystemFonts", "w:saveSubsetFonts", "w:saveFormsData", "w:mirrorMargins", - "w:alignBordersAndEdges", "w:bordersDoNotSurroundHeader", - "w:bordersDoNotSurroundFooter", "w:gutterAtTop", "w:hideSpellingErrors", - "w:hideGrammaticalErrors", "w:activeWritingStyle", "w:proofState", - "w:formsDesign", "w:attachedTemplate", "w:linkStyles", - "w:stylePaneFormatFilter", "w:stylePaneSortMethod", "w:documentType", - "w:mailMerge", "w:revisionView", "w:trackRevisions", "w:doNotTrackMoves", - "w:doNotTrackFormatting", "w:documentProtection", "w:autoFormatOverride", - "w:styleLockTheme", "w:styleLockQFSet", "w:defaultTabStop", "w:autoHyphenation", - "w:consecutiveHyphenLimit", "w:hyphenationZone", "w:doNotHyphenateCaps", - "w:showEnvelope", "w:summaryLength", "w:clickAndTypeStyle", - "w:defaultTableStyle", "w:evenAndOddHeaders", "w:bookFoldRevPrinting", - "w:bookFoldPrinting", "w:bookFoldPrintingSheets", - "w:drawingGridHorizontalSpacing", "w:drawingGridVerticalSpacing", - "w:displayHorizontalDrawingGridEvery", "w:displayVerticalDrawingGridEvery", - "w:doNotUseMarginsForDrawingGridOrigin", "w:drawingGridHorizontalOrigin", - "w:drawingGridVerticalOrigin", "w:doNotShadeFormData", "w:noPunctuationKerning", - "w:characterSpacingControl", "w:printTwoOnOne", "w:strictFirstAndLastChars", - "w:noLineBreaksAfter", "w:noLineBreaksBefore", "w:savePreviewPicture", - "w:doNotValidateAgainstSchema", "w:saveInvalidXml", "w:ignoreMixedContent", - "w:alwaysShowPlaceholderText", "w:doNotDemarcateInvalidXml", - "w:saveXmlDataOnly", "w:useXSLTWhenSaving", "w:saveThroughXslt", - "w:showXMLTags", "w:alwaysMergeEmptyNamespace", "w:updateFields", - "w:hdrShapeDefaults", "w:footnotePr", "w:endnotePr", "w:compat", "w:docVars", - "w:rsids", "m:mathPr", "w:attachedSchema", "w:themeFontLang", - "w:clrSchemeMapping", "w:doNotIncludeSubdocsInStats", - "w:doNotAutoCompressPictures", "w:forceUpgrade", "w:captions", - "w:readModeInkLockDown", "w:smartTagType", "sl:schemaLibrary", - "w:shapeDefaults", "w:doNotEmbedSmartTags", "w:decimalSymbol", "w:listSeparator" - ) - evenAndOddHeaders = ZeroOrOne("w:evenAndOddHeaders", successors=_tag_seq[48:]) - del _tag_seq - - @property - def evenAndOddHeaders_val(self): - """value of `w:evenAndOddHeaders/@w:val` or |None| if not present.""" - evenAndOddHeaders = self.evenAndOddHeaders - if evenAndOddHeaders is None: - return False - return evenAndOddHeaders.val - - @evenAndOddHeaders_val.setter - def evenAndOddHeaders_val(self, value): - if value in [None, False]: - self._remove_evenAndOddHeaders() - else: - self.get_or_add_evenAndOddHeaders().val = value diff --git a/spaces/cihyFjudo/fairness-paper-search/Ap Statistics Investigative Task Chapter 10 Alligators Answerszip A Real Study on Alligator Biology and Ecology.md b/spaces/cihyFjudo/fairness-paper-search/Ap Statistics Investigative Task Chapter 10 Alligators Answerszip A Real Study on Alligator Biology and Ecology.md deleted file mode 100644 index 9d3c28bbe97d53d61e5acd1bec47715ed329a8be..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Ap Statistics Investigative Task Chapter 10 Alligators Answerszip A Real Study on Alligator Biology and Ecology.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Ap Statistics Investigative Task Chapter 10 Alligators Answerszip


    Download Zip 🆓 https://tinurli.com/2uwhNz



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Edius 6.5 Full Tek Link The Benefits of Using this Software for HDR Editing Grading and Delivery.md b/spaces/cihyFjudo/fairness-paper-search/Edius 6.5 Full Tek Link The Benefits of Using this Software for HDR Editing Grading and Delivery.md deleted file mode 100644 index 3d096c9f511d2b9892aaea1b713bcc4bd4106d9a..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Edius 6.5 Full Tek Link The Benefits of Using this Software for HDR Editing Grading and Delivery.md +++ /dev/null @@ -1,5 +0,0 @@ -
    -

    View High Dynamic Range video the way it was meant to be seen. The amazing new MacBook Pro display delivers up to 7.7 million pixels and 1,000 nits of sustained brightness for HDR editing, grading and delivery with Final Cut Pro. Or connect the gorgeous Pro Display XDR to your Mac for a full 6K HDR playback and finishing solution.

    -

    edius 6.5 full tek link


    Download Filehttps://tinurli.com/2uwjcv



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/Kanchana 2 Movie Download Tamilrockers 176 Dont Miss the Terrifying and Amusing Film.md b/spaces/cihyFjudo/fairness-paper-search/Kanchana 2 Movie Download Tamilrockers 176 Dont Miss the Terrifying and Amusing Film.md deleted file mode 100644 index 9782ef711da1e09b51c73e89a7805173446d2328..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Kanchana 2 Movie Download Tamilrockers 176 Dont Miss the Terrifying and Amusing Film.md +++ /dev/null @@ -1,6 +0,0 @@ -

    kanchana 2 movie download tamilrockers 176


    DOWNLOADhttps://tinurli.com/2uwiyj



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Mireille Mathieu - Greatest Hits (2008).md b/spaces/cihyFjudo/fairness-paper-search/Mireille Mathieu - Greatest Hits (2008).md deleted file mode 100644 index 11741058160cbdddf72ccbef76732f3ff435cfd1..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Mireille Mathieu - Greatest Hits (2008).md +++ /dev/null @@ -1,6 +0,0 @@ -

    Mireille Mathieu - Greatest Hits (2008)


    Download Ziphttps://tinurli.com/2uwkRt



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacenc_pred.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacenc_pred.h deleted file mode 100644 index aa305f45a5dc38530319f928e30bda6b715fcff3..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacenc_pred.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * AAC encoder main-type prediction - * Copyright (C) 2015 Rostislav Pehlivanov - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * AAC encoder main-type prediction - * @author Rostislav Pehlivanov ( atomnuker gmail com ) - */ - -#ifndef AVCODEC_AACENC_PRED_H -#define AVCODEC_AACENC_PRED_H - -#include "aacenc.h" - -/* Every predictor group needs to get reset at least once in this many frames */ -#define PRED_RESET_FRAME_MIN 240 - -/* Any frame with less than this amount of frames since last reset is ok */ -#define PRED_RESET_MIN 64 - -/* Raise to filter any low frequency artifacts due to prediction */ -#define PRED_SFB_START 10 - -void ff_aac_apply_main_pred(AACEncContext *s, SingleChannelElement *sce); -void ff_aac_adjust_common_pred(AACEncContext *s, ChannelElement *cpe); -void ff_aac_search_for_pred(AACEncContext *s, SingleChannelElement *sce); -void ff_aac_encode_main_pred(AACEncContext *s, SingleChannelElement *sce); - -#endif /* AVCODEC_AACENC_PRED_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aandcttab.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aandcttab.h deleted file mode 100644 index b0a2f44ecd5c032acedcf6054de49c2e88f73062..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aandcttab.h +++ /dev/null @@ -1,32 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * AAN (Arai, Agui and Nakajima) (I)DCT tables - */ - -#ifndef AVCODEC_AANDCTTAB_H -#define AVCODEC_AANDCTTAB_H - -#include - -extern const uint16_t ff_aanscales[64]; -extern const uint16_t ff_inv_aanscales[64]; - -#endif /* AVCODEC_AANDCTTAB_H */ diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/blockdsp_init_arm.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/blockdsp_init_arm.c deleted file mode 100644 index 2080d5253f9e74cd97240dc510e12ba8c9accbaa..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/blockdsp_init_arm.c +++ /dev/null @@ -1,33 +0,0 @@ -/* - * ARM optimized block operations - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "libavutil/attributes.h" -#include "libavutil/cpu.h" -#include "libavutil/arm/cpu.h" -#include "libavcodec/blockdsp.h" -#include "blockdsp_arm.h" - -av_cold void ff_blockdsp_init_arm(BlockDSPContext *c) -{ - int cpu_flags = av_get_cpu_flags(); - - if (have_neon(cpu_flags)) - ff_blockdsp_init_neon(c); -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdata.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdata.h deleted file mode 100644 index 31191a84759eeefb0cfb5861be277e08c6acc62f..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdata.h +++ /dev/null @@ -1,37 +0,0 @@ -/* - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_DVDATA_H -#define AVCODEC_DVDATA_H - -#include - -extern const uint8_t ff_dv_zigzag248_direct[64]; - -extern const uint8_t ff_dv_quant_shifts[22][4]; -extern const uint8_t ff_dv_quant_offset[4]; - -#define NB_DV_VLC 409 -/* The number of entries with value zero in ff_dv_vlc_level. */ -#define NB_DV_ZERO_LEVEL_ENTRIES 72 - -extern const uint8_t ff_dv_vlc_len[NB_DV_VLC]; -extern const uint8_t ff_dv_vlc_run[NB_DV_VLC]; -extern const uint8_t ff_dv_vlc_level[NB_DV_VLC]; - -#endif /* AVCODEC_DVDATA_H */ diff --git a/spaces/coledie/Fashion_VAE/model.py b/spaces/coledie/Fashion_VAE/model.py deleted file mode 100644 index fd9329c4268078b595af4ea978976fda19897ef5..0000000000000000000000000000000000000000 --- a/spaces/coledie/Fashion_VAE/model.py +++ /dev/null @@ -1,137 +0,0 @@ -"""MNIST digit classificatin.""" -import matplotlib.pyplot as plt -import numpy as np -import torch -import torch.nn as nn -import torchvision.datasets -import torch.nn.functional as F -from torchvision import transforms - - -class Encoder(nn.Module): - def __init__(self, image_dim, latent_dim): - super().__init__() - self.image_dim = image_dim - self.latent_dim = latent_dim - self.cnn = nn.Sequential( - nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, padding=2), - nn.MaxPool2d(kernel_size=2), - nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), - nn.MaxPool2d(kernel_size=2), - nn.Flatten(1, -1), - ) - self.l_mu = nn.Linear(1568, np.product(self.latent_dim)) - self.l_sigma = nn.Linear(1568, np.product(self.latent_dim)) - - def forward(self, x): - x = x.reshape((-1, 1, *self.image_dim)) - x = self.cnn(x) - mu = self.l_mu(x) - sigma = self.l_sigma(x) - return mu, sigma - - -class Decoder(nn.Module): - def __init__(self, image_dim, latent_dim): - super().__init__() - self.image_dim = image_dim - self.latent_dim = latent_dim - self.cnn = nn.Sequential( - nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, padding=2), - nn.MaxPool2d(kernel_size=2), - nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2), - nn.MaxPool2d(kernel_size=2), - nn.Flatten(1, -1), - nn.Linear(288, np.product(self.image_dim)), - nn.Sigmoid(), - ) - - def forward(self, c): - c = c.reshape((-1, 1, *self.latent_dim)) - x = self.cnn(c) - return x - - -class VAE(nn.Module): - def __init__(self, image_dim=(28, 28), latent_dim=(14, 14)): - super().__init__() - self.image_dim = image_dim - self.encoder = Encoder(image_dim, latent_dim) - self.decoder = Decoder(image_dim, latent_dim) - - def forward(self, x): - x = x.reshape((-1, 1, *self.image_dim)) - mu, sigma = self.encoder(x) - c = mu + sigma * torch.randn_like(sigma) - xhat = self.decoder(c) - return xhat, mu, sigma - - -if __name__ == '__main__': - N_EPOCHS = 100 - LEARNING_RATE = .001 - - model = VAE().cuda() - optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE) - loss_fn = torch.nn.MSELoss() - - dataset_base = torchvision.datasets.FashionMNIST("MNIST", download=True, transform=transforms.ToTensor()) - - dataset_base_2 = torchvision.datasets.MNIST("MNIST", download=True, transform=transforms.ToTensor()) - dataset_base = torch.utils.data.ConcatDataset([dataset_base, dataset_base_2]) - - dataset_train, dataset_test = torch.utils.data.random_split( - dataset_base, (int(.8 * len(dataset_base)), int(.2 * len(dataset_base))) - ) - - model.train() - dataloader = torch.utils.data.DataLoader(dataset_train, - batch_size=512, - shuffle=True, - num_workers=0) - i = 0 - for epoch in range(N_EPOCHS): - total_loss = 0 - for x, label in dataloader: - #for j in range(512): - # plt.imsave(f"{i}-{label[j]}.jpg", np.stack([x[j].reshape((28, 28)).detach().numpy()] * 3, -1), cmap='gray') - # i += 1 - #exit() - x = x.cuda() - label = label.cuda() - optimizer.zero_grad() - xhat, mu, logvar = model(x) - - BCE = F.binary_cross_entropy(xhat, x.reshape(xhat.shape), reduction='mean') - # https://arxiv.org/abs/1312.6114 - # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) - KLD = -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp()) - loss = BCE + KLD - loss.backward() - optimizer.step() - total_loss += loss.item() - print(f"{epoch}: {total_loss:.4f}") - - model.cpu() - with open("vae.pt", "wb") as file: - torch.save(model, file) - model.eval() - dataloader = torch.utils.data.DataLoader(dataset_test, - batch_size=512, - shuffle=True, - num_workers=0) - n_correct = 0 - - COLS = 4 - ROWS = 4 - fig, axes = plt.subplots(ncols=COLS, nrows=ROWS, figsize=(5.5, 3.5), - constrained_layout=True) - - dataloader_gen = iter(dataloader) - x, label = next(dataloader_gen) - xhat, mu, logvar = model(x) - xhat = xhat.reshape((-1, 28, 28)) - for row in range(ROWS): - for col in range(COLS): - axes[row, col].imshow(xhat[row * COLS + col].detach().numpy(), cmap="gray") - plt.show() diff --git a/spaces/congsaPfin/Manga-OCR/logs/Enjoy the most realistic driving physics endless customization and huge open world with Ultimate Car Driving Simulator MOD APK (VIP Unlocked Unlimited Money).md b/spaces/congsaPfin/Manga-OCR/logs/Enjoy the most realistic driving physics endless customization and huge open world with Ultimate Car Driving Simulator MOD APK (VIP Unlocked Unlimited Money).md deleted file mode 100644 index d23da2e43fba31bdc3c4be8dfd93024762fc1419..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Enjoy the most realistic driving physics endless customization and huge open world with Ultimate Car Driving Simulator MOD APK (VIP Unlocked Unlimited Money).md +++ /dev/null @@ -1,131 +0,0 @@ -
    -

    Ultimate Car Driving Simulator Mod APK: VIP Unlocked, Unlimited Money

    -

    If you are a fan of racing games and want to experience the thrill of driving realistic cars on various terrains, then you should try Ultimate Car Driving Simulator. This game is one of the best car driving simulators on Android, with a huge open world map, realistic physics and graphics, and customizable cars and garage. However, if you want to enjoy the game to the fullest, you might need some extra resources and features that are not available in the original version. That's why we recommend you to download Ultimate Car Driving Simulator Mod APK, which gives you VIP unlocked, unlimited money, and more. In this article, we will tell you everything you need to know about this mod apk, including its features, pros and cons, and how to download and install it on your device.

    -

    ultimate car driving simulator mod apk vip unlocked unlimited money


    Download File ——— https://urlca.com/2uO7Gg



    -

    Introduction

    -

    Ultimate Car Driving Simulator is a racing game developed by Sir Studios, which has over 100 million downloads on Google Play. The game lets you drive various types of cars, from sports cars to off-road vehicles, on a huge open world map with different locations and environments. You can customize your car's appearance and performance, upgrade and repair it in your garage, and explore the map at your own pace. You can also choose from different game modes, such as free roam, traffic mode, checkpoint mode, or drift mode.

    -

    However, some features and resources in the game are limited or require real money to unlock. For example, you need to watch ads or pay money to unlock some cars or get more money. You also need to earn VIP points to access some exclusive features, such as removing ads, unlocking all cars, getting double rewards, and more. That's why some players prefer to use Ultimate Car Driving Simulator Mod APK, which is a modified version of the game that gives you VIP unlocked, unlimited money, and more. With this mod apk, you can enjoy the game without any restrictions or limitations.

    -

    To download and install Ultimate Car Driving Simulator Mod APK on your device, you need to follow these simple steps:

    -

    * ultimate car simulator modded apk with vip features and unlimited cash
    -* download ultimate car driving simulation mod apk free vip and money
    -* ultimate car driving simulator hack apk vip access and infinite money
    -* ultimate car driving simulator premium mod apk unlimited coins and vip
    -* ultimate car driving simulator cracked apk with vip unlocked and money
    -* ultimate car simulator mod apk free download vip and unlimited money
    -* ultimate car driving simulator cheat apk with infinite money and vip
    -* ultimate car driving simulator full mod apk unlimited money and vip
    -* ultimate car driving simulator latest mod apk vip and money
    -* ultimate car driving simulator pro mod apk with vip features and money
    -* ultimate car simulator hacked apk free vip and unlimited money
    -* ultimate car driving simulator unlocked mod apk with money and vip
    -* ultimate car driving simulator modded game apk vip and unlimited money
    -* ultimate car driving simulator 2020 mod apk with vip and money
    -* ultimate car driving simulator best mod apk unlimited money and vip
    -* ultimate car simulator cheat game apk with vip features and money
    -* ultimate car driving simulator unlimited cash and vip mod apk
    -* ultimate car driving simulator free premium and money mod apk
    -* ultimate car driving simulator infinite coins and vip mod apk
    -* ultimate car driving simulator 2021 mod apk with money and vip
    -* ultimate car simulator cracked game apk with unlimited money and vip
    -* ultimate car driving simulator hack game apk with money and vip
    -* ultimate car driving simulator mod version apk with vip and money
    -* ultimate car driving simulator no ads and money mod apk
    -* ultimate car driving simulator all cars unlocked and money mod apk
    -* ultimate car driving simulator realistic physics and money mod apk
    -* ultimate car driving simulator open world and money mod apk
    -* ultimate car driving simulator custom cars and money mod apk
    -* ultimate car driving simulator high graphics and money mod apk
    -* ultimate car driving simulator offline mode and money mod apk
    -* ultimate car simulator premium version mod apk with money and vip
    -* ultimate car driving simulator latest version mod apk with vip and money
    -* ultimate car driving simulator new update mod apk with money and vip
    -* ultimate car driving simulator online multiplayer mod apk with vip and money
    -* ultimate car driving simulator fun gameplay mod apk with money and vip
    -* ultimate car simulator full version mod apk with unlimited money and vip
    -* ultimate car driving simulator easy controls and money mod apk
    -* ultimate car driving simulator realistic sounds and money mod apk
    -* ultimate car driving simulator fast cars and money mod apk
    -* ultimate car driving simulator racing mode and money mod apk

    -
      -
    1. Click on this link to download the mod apk file.
    2. -
    3. Go to your device's settings and enable installation from unknown sources.
    4. -
    5. Locate the downloaded file in your file manager and tap on it to install it.
    6. -
    7. Launch the game and enjoy VIP unlocked, unlimited money, and more.
    8. -
    -

    Features of Ultimate Car Driving Simulator Mod APK

    -

    VIP Unlocked

    -

    One of the main features of Ultimate Car Driving Simulator Mod APK is that it gives you VIP unlocked. This means that you can access all the benefits of being a VIP without spending any money or earning any VIP points. Some of these benefits are:

    -
      -
    • All cars unlocked: You can drive any car you want without having to unlock it with money or ads.
    • -
    • Double rewards: You can earn twice as much money and VIP points from completing missions and challenges.
    • -
    • Exclusive cars: You can access some special cars that are only available for VIPs, such as the Bugatti Chiron, the Lamborghini Veneno, or the Ferrari LaFerrari.
    • -
    -

    To activate the VIP mode, you just need to tap on the VIP icon on the top right corner of the screen. You will see a green check mark indicating that you are a VIP. You can also deactivate the VIP mode anytime you want by tapping on the icon again.

    -

    Unlimited Money

    -

    Another feature of Ultimate Car Driving Simulator Mod APK is that it gives you unlimited money. This means that you can buy anything you want in the game without worrying about running out of money. You can use your money to:

    -
      -
    • Buy new cars: You can choose from over 80 different cars, from classic cars to supercars, from muscle cars to off-road vehicles.
    • -
    • Customize your cars: You can change your car's color, paint, wheels, spoilers, exhausts, and more.
    • -
    • Upgrade your cars: You can improve your car's speed, acceleration, handling, braking, and more.
    • -
    • Repair your cars: You can fix any damage or wear on your car's body or engine.
    • -
    -

    To get unlimited money in the game, you just need to play normally and earn money from completing missions and challenges. You will see that your money will never decrease, no matter how much you spend. You can also use the double rewards feature to get more money faster.

    -

    Realistic Physics and Graphics

    -

    Ultimate Car Driving Simulator Mod APK also enhances the realistic physics and graphics of the game. This means that you can enjoy a more immersive and realistic car driving experience on your device. Some of the aspects that are improved are:

    -
      -
    • Physics: The game simulates real car physics, such as gravity, inertia, friction, collision, suspension, and more. You can feel the difference between driving on asphalt, dirt, grass, or snow. You can also perform realistic stunts, such as drifting, jumping, or crashing.
    • -
    • Graphics: The game offers high-quality graphics options and settings, such as resolution, texture quality, shadow quality, anti-aliasing, and more. You can adjust them according to your device's performance and preference. You can also enjoy realistic lighting effects, such as day and night cycle, sun rays, reflections, or lens flare.
    • -
    -

    Huge Open World Map

    -

    Ultimate Car Driving Simulator Mod APK also expands the huge open world map of the game. This means that you can explore and discover more places and environments in the game. Some of the features of the map are:

    -
      -
    • Size: The map is one of the biggest and most detailed maps in a car driving simulator game. It covers over 16 square kilometers of land, with different terrains, such as city, desert, forest, mountain, and more.
    • -
    • Locations: The map has various locations and landmarks that you can visit and interact with, such as airports, bridges, tunnels, gas stations, ramps, loops, and more.
    • -
    • Challenges: The map also has various challenges and missions that you can complete and earn rewards, such as racing, drifting, jumping, or smashing.
    • -
    -

    To explore and discover the huge open world map of the game, you just need to drive around and follow the signs and markers on the map. You can also use the mini-map on the top left corner of the screen to navigate and find your destination. You can also switch between different camera views by tapping on the camera icon on the bottom right corner of the screen.

    -

    Customizable Cars and Garage

    -

    Ultimate Car Driving Simulator Mod APK also enhances the customizable cars and garage of the game. This means that you can personalize your car's appearance and performance according to your taste and style. Some of the features of the cars and garage are:

    -
      -
    • Cars: The game has over 80 different cars that you can drive and customize, from classic cars to supercars, from muscle cars to off-road vehicles. Each car has its own characteristics and stats, such as speed, acceleration, handling, braking, and more.
    • -
    • Customization: The game allows you to customize your car's appearance and performance in various ways. You can change your car's color, paint, wheels, spoilers, exhausts, and more. You can also improve your car's speed, acceleration, handling, braking, and more.
    • -
    • Garage: The game also has a garage where you can store, upgrade, and repair your cars. You can access your garage by tapping on the garage icon on the bottom left corner of the screen. You can also visit other players' garages and see their cars.
    • -
    -

    Pros and Cons of Ultimate Car Driving Simulator Mod APK

    -

    Ultimate Car Driving Simulator Mod APK is a great way to enjoy the game without any limitations or restrictions. However, it also has some drawbacks and risks that you should be aware of before using it. Here are some of the pros and cons of Ultimate Car Driving Simulator Mod APK:

    -

    Pros

    -

    Some of the advantages of using Ultimate Car Driving Simulator Mod APK are:

    -
      -
    • You can access all the features and resources of the game for free, such as VIP unlocked, unlimited money, all cars unlocked, and more.
    • -
    • You can customize your car's appearance and performance in any way you want, without worrying about money or ads.
    • -
    • You can enjoy a realistic and immersive car driving experience, with realistic physics and graphics, and a huge open world map.
    • -
    • You can have fun and challenge yourself with various game modes, challenges, and missions.
    • -
    • You can share your cars and garage with other players and see their cars.
    • -
    -

    Cons

    -

    Some of the disadvantages or risks of using Ultimate Car Driving Simulator Mod APK are:

    -
      -
    • You might face some compatibility or performance issues on your device, depending on your device's specifications and settings.
    • -
    • You might encounter some bugs or glitches in the game, such as crashes, freezes, or errors.
    • -
    • You might lose your progress or data if you uninstall the mod apk or update the game.
    • -
    • You might get banned or suspended from the game if you use the mod apk online or in multiplayer mode.
    • -
    • You might violate the terms and conditions of the game or the Google Play Store by using the mod apk.
    • -
    -

    Conclusion

    -

    Ultimate Car Driving Simulator is one of the best car driving simulators on Android, with a huge open world map, realistic physics and graphics, and customizable cars and garage. However, if you want to enjoy the game to the fullest, you might need some extra resources and features that are not available in the original version. That's why we recommend you to download Ultimate Car Driving Simulator Mod APK, which gives you VIP unlocked, unlimited money, and more. With this mod apk, you can enjoy the game without any restrictions or limitations. However, you should also be careful of the drawbacks and risks of using the mod apk, such as compatibility issues, bugs, data loss, or bans. Therefore, you should use the mod apk at your own risk and discretion.

    -

    We hope this article has helped you to learn more about Ultimate Car Driving Simulator Mod APK and how to download and install it on your device. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

    -

    FAQs

    -

    Here are some of the frequently asked questions and answers about Ultimate Car Driving Simulator Mod APK:

    -
      -
    1. Is Ultimate Car Driving Simulator Mod APK safe to use?
      Ultimate Car Driving Simulator Mod APK is generally safe to use, as long as you download it from a trusted source and scan it with an antivirus before installing it. However, you should also be aware of the potential risks of using a mod apk, such as compatibility issues, bugs, data loss, or bans. Therefore, you should use the mod apk at your own risk and discretion.
    2. -
    3. Can I play Ultimate Car Driving Simulator Mod APK online or with other players?
      Ultimate Car Driving Simulator Mod APK is mainly designed for offline or single-player mode. However, you can also play online or with other players in some game modes, such as traffic mode or checkpoint mode. However, you should be careful not to use the mod apk online or in multiplayer mode too often or too blatantly, as you might get banned or suspended from the game for cheating or hacking.
    4. -
    5. What are some alternatives to Ultimate Car Driving Simulator Mod APK?
      If you are looking for some other car driving simulator games that you can play on your Android device, you might want to check out these alternatives:

      -
        -
      • Real Driving Sim: This game is another realistic car driving simulator game, with over 80 cars, 20 cities, and various game modes. You can also customize your car's appearance and performance, and enjoy realistic physics and graphics.
      • -
      • Extreme Car Driving Simulator: This game is a fun and exciting car driving simulator game, with over 25 cars, 3 maps, and various game modes. You can also perform stunts, drifts, and crashes, and enjoy realistic physics and graphics.
      • -
      • Car Simulator 2: This game is a multiplayer car driving simulator game, with over 30 cars, 3 maps, and various game modes. You can also interact with other players, join races and clubs, and enjoy realistic physics and graphics.
      • -
      -
    6. How can I contact the developers of Ultimate Car Driving Simulator Mod APK?
      If you have any questions, feedback, or issues regarding Ultimate Car Driving Simulator Mod APK, you can contact the developers of the mod apk by visiting their website or sending them an email at . You can also follow them on their social media accounts to get the latest news and updates about the mod apk.
    7. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Get Geometry Dash Meltdown Mod APK for Free on Android Devices.md b/spaces/congsaPfin/Manga-OCR/logs/How to Get Geometry Dash Meltdown Mod APK for Free on Android Devices.md deleted file mode 100644 index 193b4b40171f31529877fc7fb77fb15a63e4f815..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to Get Geometry Dash Meltdown Mod APK for Free on Android Devices.md +++ /dev/null @@ -1,85 +0,0 @@ - -

    Geometry Dash Meltdown Mod APK: A Fun and Challenging Platformer Game

    -

    If you are looking for a game that can test your reflexes, skills, and patience, then you should try Geometry Dash Meltdown. This is a platformer game that will make you jump, fly, and flip your way through various levels filled with spikes, fire, monsters, and other obstacles. You will need to time your moves perfectly to avoid crashing and losing your progress. Geometry Dash Meltdown is a game that will keep you entertained and challenged for hours.

    -

    free download geometry dash meltdown mod apk


    Download File ››› https://urlca.com/2uO7bp



    -

    What is Geometry Dash Meltdown?

    -

    Geometry Dash Meltdown is a spin-off of the popular Geometry Dash series by RobTop Games. It was released in 2015 as a free game for Android and iOS devices. The game features three exclusive levels: The Seven Seas, Viking Arena, and Airborne Robots. Each level has its own theme, music, and difficulty. You can also create your own levels using the level editor and share them with other players online.

    -

    Features of Geometry Dash Meltdown

    -

    Unique and colorful graphics

    -

    One of the most appealing aspects of Geometry Dash Meltdown is its graphics. The game has a vibrant and neon-like style that suits the fast-paced and energetic gameplay. The backgrounds, obstacles, and characters are all designed with geometric shapes and patterns that create a visually stunning effect. You will never get bored of the game's aesthetics as they change with every level.

    -

    Dynamic and rhythmic music

    -

    Another feature that makes Geometry Dash Meltdown stand out is its music. The game has an original soundtrack composed by F-777, a famous electronic music producer. The music matches the mood and tempo of each level, creating a immersive and exhilarating experience. You will feel the adrenaline rush as you jump and dash to the beat of the music.

    -

    Simple and intuitive controls

    -

    The game has very simple and intuitive controls that anyone can learn in seconds. You only need to tap the screen to make your character jump, fly, or flip. You can also hold the screen to perform continuous jumps or flights. The game does not require any complicated gestures or buttons to play. However, do not let the simplicity fool you. The game is very hard to master as it requires precise timing and coordination.

    -

    Various levels and obstacles

    -

    The game has three official levels that you can play: The Seven Seas, Viking Arena, and Airborne Robots. Each level has its own theme, music, and difficulty. You will face different obstacles such as spikes, fireballs, saws, lasers, monsters, portals, gravity switches, and more. You will need to overcome these challenges to reach the end of each level. You can also play user-generated levels that are created by other players using the level editor.

    -

    Customizable characters and icons

    -

    The game allows you to customize your character and icon with different colors, shapes, faces, trails, and effects. You can unlock these options by collecting stars, coins, diamonds, keys, shards, orbs, and other items in the game. You can also use these items to unlock new game modes such as practice mode, gauntlets mode, map packs mode, quests mode, daily levels mode, weekly demon mode, hall of fame mode, vaults mode, shops mode, secret rooms mode and more.

    -

    free geometry dash meltdown apk mod unlocked
    -download geometry dash meltdown mod apk latest version
    -geometry dash meltdown hack mod apk free download
    -free download geometry dash meltdown full version mod apk
    -geometry dash meltdown mod apk download for android
    -how to download geometry dash meltdown mod apk for free
    -geometry dash meltdown unlimited coins mod apk free download
    -free download geometry dash meltdown premium mod apk
    -geometry dash meltdown mod apk no ads free download
    -download geometry dash meltdown mod apk all levels unlocked
    -geometry dash meltdown mega mod apk free download
    -free download geometry dash meltdown hacked apk mod
    -geometry dash meltdown mod apk offline free download
    -download geometry dash meltdown mod apk unlimited money
    -geometry dash meltdown pro mod apk free download
    -free download geometry dash meltdown cheat mod apk
    -geometry dash meltdown mod apk android 1 free download
    -download geometry dash meltdown mod apk revdl
    -geometry dash meltdown vip mod apk free download
    -free download geometry dash meltdown cracked mod apk
    -geometry dash meltdown mod apk 2.2.11 free download
    -download geometry dash meltdown mod apk rexdl
    -geometry dash meltdown plus mod apk free download
    -free download geometry dash meltdown extreme mod apk
    -geometry dash meltdown mod apk 2023 free download

    -

    Leaderboards and achievements

    -

    The game has online leaderboards where you can compare your scores and rankings with other players from around the world. You can also earn achievements by completing various tasks and challenges in the game. You can view your stats and progress in the game menu. You can also sync your data with your Google Play or Game Center account to save your progress and access it on different devices.

    -

    Why download Geometry Dash Meltdown Mod APK?

    -

    Geometry Dash Meltdown is a fun and challenging game that will keep you hooked for hours. However, it can also be frustrating and annoying at times. You may encounter ads and pop-ups that interrupt your gameplay. You may also find some levels and features locked or restricted unless you pay real money or watch videos. You may also run out of lives or energy that limit your playtime.

    -

    That is why you should download Geometry Dash Meltdown Mod APK. This is a modified version of the game that gives you access to all the levels and features for free. You can also enjoy the game without any ads or distractions. You can also play the game as much as you want without worrying about lives or energy. You can have unlimited fun and challenge with Geometry Dash Meltdown Mod APK.

    -

    How to download and install Geometry Dash Meltdown Mod APK?

    -

    Step-by-step guide

    -

    If you want to download and install Geometry Dash Meltdown Mod APK, you can follow these simple steps:

    -
      -
    1. Go to a trusted and reliable website that offers Geometry Dash Meltdown Mod APK. You can search for it on Google or use the link provided below.
    2. -
    3. Download the APK file to your device. Make sure you have enough storage space and a stable internet connection.
    4. -
    5. Before installing the APK file, you need to enable the "Unknown Sources" option on your device. This will allow you to install apps from sources other than the official app store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
    6. -
    7. Locate the downloaded APK file on your device using a file manager app or your browser's downloads folder. Tap on it to start the installation process.
    8. -
    9. Follow the instructions on the screen to complete the installation. It may take a few seconds or minutes depending on your device's performance.
    10. -
    11. Once the installation is done, you can launch the game from your app drawer or home screen. Enjoy playing Geometry Dash Meltdown Mod APK!
    12. -
    -

    Tips and tricks for playing Geometry Dash Meltdown

    -

    If you want to improve your skills and performance in Geometry Dash Meltdown, you can use these tips and tricks:

    -
      -
    • Practice makes perfect. The game is very hard and requires a lot of trial and error. You will need to play each level multiple times to memorize the patterns and timings of the obstacles. You can also use the practice mode to practice each level without losing lives or progress.
    • -
    • Use headphones. The game's music is not only catchy but also helpful. The music syncs with the gameplay and gives you cues on when to jump or dash. Using headphones will help you hear the music better and focus on the game.
    • -
    • Customize your character and icon. The game allows you to change the appearance of your character and icon with different colors, shapes, faces, trails, and effects. You can use these options to make your character and icon more visible and distinctive on the screen. This will help you avoid confusion and mistakes.
    • -
    • Watch videos of other players. You can learn a lot from watching how other players play each level. You can see how they avoid obstacles, collect items, and complete challenges. You can also get inspired by their creativity and style. You can watch videos of other players on YouTube or in the game's community section.
    • -
    • Have fun and don't give up. The game is meant to be fun and challenging, not frustrating and stressful. Don't let the game get to you if you fail or lose. Instead, try again and learn from your mistakes. Remember that every failure is an opportunity to improve and succeed.
    • -
    -

    Conclusion

    -

    Geometry Dash Meltdown is a platformer game that will test your reflexes, skills, and patience. It has unique and colorful graphics, dynamic and rhythmic music, simple and intuitive controls, various levels and obstacles, customizable characters and icons, leaderboards and achievements, and more. You can also download Geometry Dash Meltdown Mod APK to unlock all the levels and features, remove ads and distractions, and enjoy unlimited fun and challenge. You can download and install the game easily by following the step-by-step guide provided in this article. You can also use the tips and tricks to improve your skills and performance in the game. Geometry Dash Meltdown is a game that will make you addicted and satisfied.

    -

    FAQs

    -

    Here are some frequently asked questions about Geometry Dash Meltdown and Geometry Dash Meltdown Mod APK:

    -
      -
    1. What is the difference between Geometry Dash Meltdown and Geometry Dash?
    2. -

      Geometry Dash Meltdown is a spin-off of Geometry Dash, a popular platformer game by RobTop Games. Geometry Dash Meltdown has three exclusive levels that are not available in Geometry Dash. Geometry Dash Meltdown is also free to play, while Geometry Dash costs $1.99 to download.

      -
    3. Is Geometry Dash Meltdown Mod APK safe to use?
    4. -

      Yes, Geometry Dash Meltdown Mod APK is safe to use as long as you download it from a trusted and reliable website. You should also scan the APK file with an antivirus app before installing it on your device. However, you should be aware that using modded apps may violate the terms and conditions of the original game and may result in bans or penalties.

      -
    5. How can I update Geometry Dash Meltdown Mod APK?
    6. -

      To update Geometry Dash Meltdown Mod APK, you need to download the latest version of the modded app from the same website where you downloaded the previous version. You can also check for updates on the website regularly or subscribe to their notifications. You should also uninstall the old version of the modded app before installing the new one.

      -
    7. How can I backup my progress in Geometry Dash Meltdown?
    8. -

      To backup your progress in Geometry Dash Meltdown, you need to create an account in the game and sync your data with your Google Play or Game Center account. You can also use cloud backup services such as Google Drive or Dropbox to save your data. You should backup your progress regularly to avoid losing it in case of data corruption or device loss.

      -
    9. How can I contact the developers of Geometry Dash Meltdown?
    10. -

      To contact the developers of Geometry Dash Meltdown, you can visit their official website at https://www.robtopgames.com/ or follow them on their social media accounts such as Facebook, Twitter, Instagram, YouTube, and Reddit. You can also email them at support@robtopgames.com or leave a comment or review on their app store page.

      -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Leaf Green version game APK - The Most Authentic Pokemon Emulator for Android.md b/spaces/congsaPfin/Manga-OCR/logs/Leaf Green version game APK - The Most Authentic Pokemon Emulator for Android.md deleted file mode 100644 index aba0d4138c1a430cf8e5bf247e30a0a2391e95aa..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Leaf Green version game APK - The Most Authentic Pokemon Emulator for Android.md +++ /dev/null @@ -1,142 +0,0 @@ - -

    How to Download Leaf Green APK for Android

    -

    If you are a fan of Pokemon games, you might have heard of Leaf Green APK, a remake of the classic Pokemon game that was released for the Game Boy Advance in 2004. Leaf Green APK is an Android app that lets you play this nostalgic game on your smartphone or tablet. In this article, we will show you what Leaf Green APK is, how to download and install it on your Android device, and how to play it on your PC or Mac using an Android emulator.

    -

    download leaf green apk


    Download ✏ ✏ ✏ https://urlca.com/2uOeVd



    -

    What is Leaf Green APK?

    -

    Leaf Green APK is an Android app that contains the Pokemon Leaf Green game, which is a remake of the original Pokemon Red and Green games that were released for the Game Boy in 1996. The game follows the same plot and gameplay as the original games, but with enhanced graphics, smoother game-play, and new features introduced in Generation III of Pokemon games.

    -

    A remake of the classic Pokemon game

    -

    In Leaf Green APK, you play as a young Pokemon trainer who sets out on a journey across the Kanto region to catch and train Pokemon, battle other trainers, and challenge the eight Gym Leaders and the Elite Four. Along the way, you will encounter Team Rocket, a criminal organization that wants to use Pokemon for evil purposes. You will also have a rival who will challenge you throughout your adventure.

    -

    Features and benefits of Leaf Green APK

    -

    Leaf Green APK has many features and benefits that make it a great choice for Pokemon fans. Some of them are:

    -
      -
    • You can choose from three starter Pokemon: Bulbasaur, Charmander, or Squirtle.
    • -
    • You can catch and train over 150 different Pokemon, including some from Generation II and III.
    • -
    • You can use items, abilities, moves, and strategies that were not available in the original games.
    • -
    • You can trade and battle with other players using the Game Link Cable or the Wireless Adapter.
    • -
    • You can access new areas and mini-games, such as the Sevii Islands and the Trainer Tower.
    • -
    • You can enjoy the nostalgic music and sound effects from the original games.
    • -
    -

    How to download and install Leaf Green APK on your Android device

    -

    If you want to play Leaf Green APK on your Android device, you will need to download and install it manually, as it is not available on the Google Play Store. Here are the requirements and compatibility, steps to download and install, and tips and tricks to enjoy Leaf Green APK on your Android device.

    -

    Requirements and compatibility

    -

    To download and install Leaf Green APK on your Android device, you will need:

    -
      -
    • An Android device running Android 4.0 or higher.
    • -
    • At least 28 MB of free storage space on your device or SD card.
    • -
    • A stable internet connection to download the APK file.
    • -
    • A file manager app to locate and install the APK file.
    • -
    • Allow installation of apps from unknown sources on your device settings.
    • -
    -

    Steps to download and install Leaf Green APK

    -

    To download and install Leaf Green APK on your Android device, follow these steps:

    -

    download pokemon leaf green apk for android
    -download leaf green version game apk
    -download leaf green gba emulator apk
    -download leaf green apk mod
    -download leaf green apk offline
    -download leaf green apk latest version
    -download leaf green apk no root
    -download leaf green apk with cheats
    -download leaf green apk english
    -download leaf green apk full version
    -download leaf green apk free fire
    -download leaf green apk hack
    -download leaf green apk unlimited money
    -download leaf green apk 2023
    -download leaf green apk update
    -download leaf green apk obb
    -download leaf green apk data
    -download leaf green apk file
    -download leaf green apk android 11
    -download leaf green apk android 10
    -download leaf green apk android 9
    -download leaf green apk android 8
    -download leaf green apk android 7
    -download leaf green apk android 6
    -download leaf green apk android 5
    -download leaf green apk for pc
    -download leaf green apk for ios
    -download leaf green apk for windows 10
    -download leaf green apk for windows 7
    -download leaf green apk for mac
    -download leaf green apk for laptop
    -download leaf green apk for tablet
    -download leaf green apk for chromebook
    -download leaf green apk for firestick
    -download leaf green apk for smart tv
    -how to download leaf green apk on android
    -how to download leaf green apk on pc
    -how to download leaf green apk on ios
    -how to download leaf green apk on windows 10
    -how to download leaf green apk on windows 7
    -how to download leaf green apk on mac
    -how to download leaf green apk on laptop
    -how to download leaf green apk on tablet
    -how to download leaf green apk on chromebook
    -how to download leaf green apk on firestick
    -how to download leaf green apk on smart tv
    -where to download leaf green apk for free
    -where to download leaf green apk safely
    -where to download leaf green apk without virus
    -where to download leaf green apk without ads

    -
      -
    1. Go to this link on your browser and tap on Download APK (28 MB) button.
    2. -
    3. Wait for the download to finish and locate the downloaded file using your file manager app.
    4. -
    5. Tap on the file and select Install when prompted.
    6. -
    7. Wait for the installation to complete and tap on Open to launch the app.Tips and tricks to enjoy Leaf Green APK -

      Now that you have downloaded and installed Leaf Green APK on your Android device, you can start playing and enjoying this classic Pokemon game. Here are some tips and tricks to help you get the most out of Leaf Green APK:

      -
        -
      • Save your game frequently, especially before important battles or events.
      • -
      • Use the fast forward feature to speed up the game-play and skip boring parts.
      • -
      • Use the cheat codes to get unlimited items, money, or rare Pokemon.
      • -
      • Explore every area and talk to every NPC to find hidden secrets and rewards.
      • -
      • Use the online features to trade and battle with other players around the world.
      • -
      -

      How to play Leaf Green APK on your PC or Mac

      -

      If you want to play Leaf Green APK on a bigger screen and with better controls, you can also play it on your PC or Mac using an Android emulator. An Android emulator is a software that mimics the Android operating system on your computer, allowing you to run Android apps and games on your PC or Mac. In this section, we will show you what an Android emulator is, the best Android emulators for PC and Mac, and the steps to play Leaf Green APK on your PC or Mac using an Android emulator.

      -

      What is an Android emulator?

      -

      An Android emulator is a software that creates a virtual Android device on your computer, allowing you to run Android apps and games on your PC or Mac. An Android emulator can have different features and functions, such as:

      -
        -
      • Customizing the screen size, resolution, and orientation of the virtual device.
      • -
      • Mapping the keyboard and mouse to control the virtual device.
      • -
      • Accessing the Google Play Store and other app stores on the virtual device.
      • -
      • Recording and capturing the screen of the virtual device.
      • -
      • Sharing files and data between the virtual device and your computer.
      • -
      -

      The best Android emulators for PC and Mac

      -

      There are many Android emulators available for PC and Mac, but not all of them are compatible with Leaf Green APK. Some of them may have issues with performance, graphics, sound, or online features. Therefore, it is important to choose an Android emulator that works well with Leaf Green APK. Here are some of the best Android emulators for PC and Mac that we recommend:

      - - - - - - - -
      NameDescriptionDownload Link
      NoxPlayerA fast and stable Android emulator that supports high-performance gaming and multiple instances.NoxPlayer Download
      BlueStacksA popular and powerful Android emulator that offers advanced features and a user-friendly interface.BlueStacks Download
      LDPlayerA lightweight and smooth Android emulator that focuses on gaming performance and compatibility.LDPlayer Download
      MEmu PlayA versatile and flexible Android emulator that supports multiple Android versions and devices.MEmu Play Download
      KOPlayerA simple and efficient Android emulator that offers a clean and minimalistic interface.KOPlayer Download
      -

      Steps to play Leaf Green APK on your PC or Mac using an Android emulator

      -

      To play Leaf Green APK on your PC or Mac using an Android emulator, follow these steps:

      -
        -
      1. Download and install an Android emulator of your choice from the links above.
      2. -
      3. Launch the Android emulator and sign in with your Google account.
      4. -
      5. Download Leaf Green APK from this link on your browser.
      6. -
      7. Drag and drop the downloaded file into the Android emulator window or use the file manager app to locate and install it.
      8. -
      9. Launch Leaf Green APK from the app drawer or home screen of the Android emulator.
      10. -
      11. Enjoy playing Leaf Green APK on your PC or Mac with better graphics and controls.
      12. -
      -

      Conclusion

      -

      In conclusion, Leaf Green APK is an amazing app that lets you play one of the best Pokemon games ever made on your Android device or PC or Mac. It has all the features and benefits of the original game, plus some new ones that make it more enjoyable and convenient. If you want to download and install Leaf Green APK on your Android device or PC or Mac, you can follow the steps we have provided in this article. We hope you have fun playing Leaf Green APK and reliving your childhood memories of Pokemon.

      FAQs

      -

      Here are some of the frequently asked questions about Leaf Green APK:

      -
        -
      • Q: Is Leaf Green APK safe and legal to use?
        -A: Leaf Green APK is safe and virus-free to use, as long as you download it from a trusted source. However, it is not legal to use Leaf Green APK if you do not own the original game or have the permission of the game developer.
      • -
      • Q: How can I save my progress in Leaf Green APK?
        -A: You can save your progress in Leaf Green APK by using the in-game save feature or the emulator's save state feature. The in-game save feature lets you save your game at any Pokemon Center or on the world map. The emulator's save state feature lets you save your game at any point and load it later.
      • -
      • Q: How can I transfer my save data from Leaf Green APK to another device?
        -A: You can transfer your save data from Leaf Green APK to another device by copying the save file from your device's storage or SD card to the other device's storage or SD card. The save file is usually located in the folder named "LeafGreen" or "com.nintendo.pokemonleafgreen".
      • -
      • Q: How can I update Leaf Green APK to the latest version?
        -A: You can update Leaf Green APK to the latest version by downloading and installing the new APK file from the same source you downloaded it from. However, you should back up your save data before updating, as some updates may cause compatibility issues or data loss.
      • -
      • Q: How can I contact the developer of Leaf Green APK?
        -A: You can contact the developer of Leaf Green APK by visiting their website or social media pages. However, please note that Leaf Green APK is not an official app from Nintendo or Game Freak, and they may not respond to your queries or requests.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Solitaire Hack Mod APK How to Play the Classic Card Game with More Fun.md b/spaces/congsaPfin/Manga-OCR/logs/Solitaire Hack Mod APK How to Play the Classic Card Game with More Fun.md deleted file mode 100644 index d0372a2891274fd87c57308f41777fe1be1c5fe9..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Solitaire Hack Mod APK How to Play the Classic Card Game with More Fun.md +++ /dev/null @@ -1,111 +0,0 @@ -
      -

      Solitaire Hack Mod APK: How to Play and Enjoy the Classic Card Game

      -

      Introduction

      -

      Solitaire is one of the most popular card games in the world. It is a game that can be played by anyone, anywhere, anytime. It is a game that challenges your mind, relaxes your mood, and entertains you for hours. But what if you want to spice up your solitaire experience with some extra features and options? That's where solitaire hack mod apk comes in.

      -

      solitaire hack mod apk


      Downloadhttps://urlca.com/2uO7hB



      -

      Solitaire hack mod apk is a modified version of the classic solitaire game that you can download and install on your Android device. It offers you unlimited money, unlimited hints, unlimited undos, no ads, and more. It also lets you customize the appearance of the cards, the background, and the sound effects. With solitaire hack mod apk, you can enjoy the classic card game like never before.

      -

      How to download and install solitaire hack mod apk

      -

      Step 1: Find a reliable source for the apk file

      -

      The first thing you need to do is to find a trustworthy website that provides the solitaire hack mod apk file. You can use a search engine like Google or Bing to look for it, or you can use one of the links below:

      - -

      Make sure you check the reviews and ratings of the website before downloading anything. Also, avoid clicking on any suspicious ads or pop-ups that may appear on the site.

      -

      Step 2: Enable unknown sources on your device

      -

      The next thing you need to do is to allow your device to install apps from unknown sources. This is because solitaire hack mod apk is not available on the official Google Play Store, so you need to enable this option to install it manually. To do this, follow these steps:

      -
        -
      1. Go to your device's settings and look for security or privacy options.
      2. -
      3. Find the option that says "Unknown sources" or "Install unknown apps" and toggle it on.
      4. -
      5. A warning message may pop up, asking you to confirm your choice. Tap on "OK" or "Allow".
      6. -
      -

      Now you are ready to install the solitaire hack mod apk file on your device.

      -

      solitaire mod apk unlimited money
      -solitaire hack apk download
      -solitaire mod apk no ads
      -solitaire hack apk android
      -solitaire mod apk latest version
      -solitaire hack apk ios
      -solitaire mod apk offline
      -solitaire hack apk 2023
      -solitaire mod apk premium
      -solitaire hack apk free
      -solitaire mod apk unlocked
      -solitaire hack apk online
      -solitaire mod apk pro
      -solitaire hack apk unlimited coins
      -solitaire mod apk revdl
      -solitaire hack apk 2022
      -solitaire mod apk happymod[^1^]
      -solitaire hack apk rexdl
      -solitaire mod apk vip
      -solitaire hack apk no root
      -solitaire mod apk ad free
      -solitaire hack apk all unlocked
      -solitaire mod apk full version
      -solitaire hack apk cheat
      -solitaire mod apk 10.6.0[^1^]
      -solitaire hack apk 4.16.3141.1[^1^]
      -solitaire mod apk microsoft[^1^]
      -solitaire hack apk classic
      -solitaire mod apk spider
      -solitaire hack apk klondike
      -solitaire mod apk tripeaks
      -solitaire hack apk pyramid
      -solitaire mod apk freecell
      -solitaire hack apk daily challenges[^1^]
      -solitaire mod apk unlimited hints
      -solitaire hack apk unlimited stars[^1^]
      -solitaire mod apk 2023 download
      -solitaire hack apk 2022 download
      -solitaire mod apk android 1
      -solitaire hack apk android oyun club
      -solitaire mod apk android republic
      -solitaire hack apk an1.com
      -solitaire mod apk apkpure
      -solitaire hack apk apkmody.io[^1^]
      -solitaire mod apk apkmirror[^1^]
      -solitaire hack apk apknite.com[^1^]
      -solitaire mod apk apksfree.com[^1^]
      -solitaire hack apk apktada.com[^1^]

      -

      Step 3: Download and install the apk file

      -

      The final thing you need to do is to download and install the solitaire hack mod apk file on your device. To do this, follow these steps:

      -
        -
      1. Go back to the website where you found the apk file and tap on the download button.
      2. -
      3. Wait for the download to finish and then locate the file in your device's storage.
      4. -
      5. Tap on the file and follow the instructions on the screen to install it.
      6. -
      7. Once the installation is complete, you can launch the app and start playing solitaire hack mod apk.
      8. -
      -

      How to play solitaire hack mod apk

      -

      The rules of the game

      -

      The rules of solitaire hack mod

      apk are the same as the original solitaire game. You have to arrange the cards in four piles, one for each suit, from ace to king. You can move cards from the tableau (the seven columns at the bottom) to the foundation (the four piles at the top) or within the tableau. You can only move cards that are face up and follow a descending order and an alternating color. You can also draw cards from the stock (the pile at the top left) and place them on the waste (the pile next to the stock) or on the tableau. You win the game when you complete all four piles in the foundation.

      -

      The tips and tricks for winning

      -

      Playing solitaire hack mod apk can be fun and easy, especially with the unlimited money, hints, and undos that you get. However, if you want to challenge yourself and improve your skills, here are some tips and tricks that you can use:

      -
        -
      • Plan ahead: Before you make a move, think about how it will affect the rest of the game. Try to avoid moves that will block your access to other cards or create empty spaces that you cannot fill.
      • -
      • Use the hints wisely: The hints can help you find possible moves that you may have missed, but they can also spoil the fun of the game. Use them sparingly and only when you are really stuck.
      • -
      • Use the undos carefully: The undos can help you undo a mistake or try a different strategy, but they can also make you lose track of your progress. Use them only when necessary and not as a way to cheat.
      • -
      • Focus on the aces: The aces are the most important cards in the game, as they start the foundation piles. Try to free them as soon as possible and move them to the foundation.
      • -
      • Build down in color: When moving cards within the tableau, try to build down in color, meaning that you place cards of the same suit on top of each other. This will make it easier to move them to the foundation later.
      • -
      -

      The benefits of playing solitaire

      -

      Solitaire is not only a fun and relaxing game, but also a beneficial one. Playing solitaire can help you improve your mental abilities, such as memory, concentration, logic, and problem-solving. It can also help you reduce stress, boredom, and anxiety. Solitaire is a game that you can play anytime, anywhere, and at your own pace. It is a game that can keep you entertained for hours and make you feel good.

      -

      Conclusion

      -

      Solitaire hack mod apk is a great way to enjoy the classic card game with some extra features and options. You can download and install it on your Android device easily and safely. You can play it with unlimited money, hints, undos, no ads, and more. You can also customize the appearance of the game according to your preferences. Solitaire hack mod apk is a game that can challenge your mind, relax your mood, and entertain you for hours.

      -

      If you are looking for a new and exciting way to play solitaire, then solitaire hack mod apk is the perfect choice for you. Download it now and start playing!

      -

      FAQs

      -
        -
      • Q: Is solitaire hack mod apk safe to use?
      • -
      • A: Yes, solitaire hack mod apk is safe to use as long as you download it from a reliable source and follow the instructions carefully. However, you should always be careful when installing apps from unknown sources and scan them for viruses or malware before using them.
      • -
      • Q: Is solitaire hack mod apk compatible with all Android devices?
      • -
      • A: Solitaire hack mod apk should work on most Android devices that run on Android 4.4 or higher. However, some devices may have compatibility issues or performance problems due to different specifications or settings.
      • -
      • Q: How do I update solitaire hack mod apk?
      • -
      • A: Solitaire hack mod apk does not have an automatic update feature, so you need to check for updates manually. You can do this by visiting the website where you downloaded the apk file and looking for new versions or updates. Alternatively, you can uninstall the app and install it again with the latest version.
      • -
      • Q: How do I uninstall solitaire hack mod apk?
      • -
      • A: Solitaire hack mod apk can be uninstalled like any other app on your device. You can do this by going to your device's settings and looking for apps or applications options. Then find solitaire hack mod apk in the list of installed apps and tap on it. Then tap on uninstall or remove option and confirm your choice.
      • -
      • Q: How do I contact the developer of solitaire hack mod apk?
      • -
      • A: Solitaire hack mod apk is developed by Microsoft Corporation, the same company that created the original solitaire game. You can contact them by visiting their official website at https://www.microsoft.com/en-us/ or by sending them an email at solitaire@microsoft.com.
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/Hercules-Dwayne-Johnson-1080p-Torrent.md b/spaces/contluForse/HuggingGPT/Hercules-Dwayne-Johnson-1080p-Torrent.md deleted file mode 100644 index 0e31fe207ba34f19b2d051cb7ba252cdf6943cd9..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/Hercules-Dwayne-Johnson-1080p-Torrent.md +++ /dev/null @@ -1,84 +0,0 @@ -## Hercules Dwayne Johnson 1080p Torrent - - - - - - ![Hercules Dwayne Johnson 1080p Torrent](https://2.bp.blogspot.com/-t8x29oWhQ_w/UmENhQFHYdI/AAAAAAAAFIc/jnt-ktRJAjo/s1600/fast-and-furious-7-poster-hd-wallpaper.jpg) - - - - - -**Download • [https://www.google.com/url?q=https%3A%2F%2Furllie.com%2F2txoJ9&sa=D&sntz=1&usg=AOvVaw2Sp9Y5XV38y95cyY6TpTb6](https://www.google.com/url?q=https%3A%2F%2Furllie.com%2F2txoJ9&sa=D&sntz=1&usg=AOvVaw2Sp9Y5XV38y95cyY6TpTb6)** - - - - - - - - - - - - - -# Hercules (2014): A Mythical Adventure Starring Dwayne Johnson - - - -Hercules is a 2014 action-adventure film based on the legendary Greek hero, starring Dwayne Johnson as the titular character. The film follows Hercules and his band of mercenaries as they are hired by the King of Thrace to train his army and defeat a tyrannical warlord. Along the way, Hercules must face his past and his true identity as the son of Zeus. - - - -The film is directed by Brett Ratner and also features Ian McShane, John Hurt, Rufus Sewell, Joseph Fiennes, and Irina Shayk in supporting roles. The film is loosely based on the graphic novel Hercules: The Thracian Wars by Steve Moore and Admira Wijaya. - - - -Hercules was released on July 25, 2014 in the United States and received mixed reviews from critics and audiences. The film grossed $244 million worldwide against a budget of $100 million. - - - -If you are a fan of action-packed movies with epic battles, stunning visuals, and charismatic performances, you might want to check out Hercules (2014) in high definition. You can download the movie torrent in 1080p or 3D from various sources online. However, be aware that downloading torrents may be illegal in your country and may expose you to malware or viruses. Always use a VPN when downloading torrents to protect your privacy and security. - - - -Here are some links where you can find Hercules (2014) movie torrent in 1080p or 3D: - - - -- YTS: [https://yts.do/movie/hercules-2014/](https://yts.do/movie/hercules-2014/) [^1^] - -- Forillowarez: [https://forillowarez.com/threads/h%C3%A9rcules-full-bluray-1080p-cast-ing-fr-ale-it-sub-varios-aventuras-2014.18758/](https://forillowarez.com/threads/h%C3%A9rcules-full-bluray-1080p-cast-ing-fr-ale-it-sub-varios-aventuras-2014.18758/) [^2^] - -- Filmespiratas: [https://filmespiratas.blogspot.com/2014/06/hercules-download-dublado.html](https://filmespiratas.blogspot.com/2014/06/hercules-download-dublado.html) [^3^] - - - -Enjoy watching Hercules (2014) and let us know what you think of the movie in the comments below! - - - -Hercules (2014) is not the only movie adaptation of the Greek hero's story. There have been many versions of Hercules on the big screen and on television, ranging from serious to comedic, from faithful to creative. Here are some of the most notable ones: - - - -1. Hercules (1958): This is the first film in a series of Italian peplum movies starring Steve Reeves as Hercules. The film follows Hercules as he helps Jason and the Argonauts in their quest for the Golden Fleece. The film was a huge success and spawned several sequels and spin-offs. - -2. Hercules: The Legendary Journeys (1995-1999): This is a popular television series starring Kevin Sorbo as Hercules and Michael Hurst as his sidekick Iolaus. The series mixes Greek mythology with humor and adventure, and features many guest stars from other fantasy shows. The series also spawned a spin-off, Xena: Warrior Princess. - -3. Hercules (1997): This is an animated musical comedy film produced by Disney, featuring the voice of Tate Donovan as Hercules and James Woods as Hades. The film reimagines Hercules as a naive and clumsy teenager who must prove himself as a true hero to reclaim his godhood. The film features songs by Alan Menken and David Zippel. - -4. The Legend of Hercules (2014): This is another action-adventure film released in the same year as Hercules (2014), starring Kellan Lutz as Hercules and Gaia Weiss as Hebe. The film focuses on Hercules' origin story as he rebels against his stepfather King Amphitryon and falls in love with Hebe, the princess of Crete. - - - -Which version of Hercules do you prefer? Do you think there will be more movies about him in the future? Share your thoughts with us! - - 1b8d091108 - - - - - diff --git a/spaces/contluForse/HuggingGPT/assets/Browse Mac App Store.md b/spaces/contluForse/HuggingGPT/assets/Browse Mac App Store.md deleted file mode 100644 index f48a9299b7b33073a8105b1cba20d37c03ca0e33..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Browse Mac App Store.md +++ /dev/null @@ -1,25 +0,0 @@ -
      -

      Now of course because the App Store is not affiliated with Apple, clicking on the button to open up more info on that app will launch its browser based iTunes page. From there, you can either launch iTunes, or can choose to move to your iOS device and search for that specific app.

      -

      Browse Mac App Store


      DOWNLOAD –––––>>> https://ssurll.com/2uzxh6



      -

      For Mac apps, the primary category you select is also the category in which the app appears when users browse the Mac App Store or filter search results, and it determines placement in the Categories tab on the Mac App Store in macOS 10.14 or later.

      -

      It could be argued that it is useful for antivirus software to collect certain limited browsing history leading up to a malware/webpage detection and blocking. But it is very hard to argue to exfiltrate the entire browsing history of all installed browsers regardless of whether the user has encountered malware or not. In addition, there was nothing in the app to inform the user about this data collection, and there was no way to opt out of this data collection.Dr. CleanerUnfortunately, other apps by the same developer are also collecting this data. We observed the same data being collected by Dr. Cleaner, minus the list of installed applications. There is really no good reason for a "cleaning" app to be collecting this kind of user data, even if the users were informed, which was not the case.

      -

      Brave is available as a fast, free, secure web browser for your mobile devices. Complete with a built-in ad blocker that prevents tracking, and optimized for mobile data and battery life savings. Get the Brave Browser (mobile) for Android or iOS.

      -

      Note that the search above revealed hundreds of hits. If you'd look at that many results in a flat list, it would be hard to browse. With this new hierarchical view, however, you can directly look for the results in the folders that interest you.

      -

      If you primarily search media files such as videos, photos, or audio files, NeoFinder might be the right tool for you. It permanently catalogues your files, even those that may be stored on other disks or FTP servers that are not always attached, and keeps even detailed information about your image properties (EXIF data etc.), thumbnails, XMP data, and more.

      -

      In-app purchase keys are private and can only be downloaded once. After downloading the key, store it in a safe place. Remember to revoke a key immediately if it becomes lost or compromised and ensure that offers using this key are updated.

      -

      -

      In the dialog that appears, choose Limited Access or Full Access. If you select Full Access, all users will have access to the app. Users with the Admin, Finance, and Access to Reports roles will always have access to all apps. Then click Restore.

      -

      If your iOS app offers universal purchase and has an existing macOS platform, the option to offer the iOS app on the Mac App Store will not be available. Additionally, if your iOS app is available on the Mac App Store by using this option and you later add a macOS app by adding the macOS platform in App Store Connect, releasing it to the store will replace the iOS app on the Mac App Store. All existing users for the iOS app will be updated to the macOS app.

      -

      If you create a new version of an app you removed from the App Store and it gets approved by App Review, the app status remains as Developer Removed from Sale. To release the new version, you must restore it to the App Store first.

      -

      Auto-renewable subscriptions can be priced by App Store country or region (also known as storefronts.) You can choose from 800 price points in each currency, with the option for the Account Holder to submit a request for additional higher price points.

      -

      App Store Connect provides comparable prices for all 175 App Store countries and regions, taking into account taxes and foreign exchange rates. If you want to set different prices for specific storefronts, make the changes and click Next.

      -

      International equivalents for prices not in USD are based on current exchange rates with the specific thresholds which are subject to change based on changes in taxes or foreign exchange rates. View a list of thresholds per storefront.

      -

      App Store connect provides comparable prices for all 175 App Store countries and regions, taking into account taxes and foreign exchange rates. If you want to set different prices for specific storefronts, make the changes and click Next.

      -

      You can change the account region of a test account to any of the 175 App Store regions. This allows you to test on different storefronts using the same Sandbox Apple ID, without having to create new testers.

      -

      I take your privacy very seriously.Reeder does not collect any data.All sensitive data (passwords and other account data) is stored securely in the Keychain on your device or iCloud (if you have enabled iCloud Keychain).

      -

      We pride ourselves in the security of our product and the flexibility it provides you. You can set a startup password, lock the My Private Files area, and set the level of protection you need. Passwords are stored securely in the iOS KeyChain, but you can choose to enter them each time you connect.

      -

      Secure access to your computer (Mac or PC) while you are away from home. Use our simple steps to install ZeroTier VPN for secure, encrypted access from your iPad / iPhone. With FileBrowser's efficient network access you can even browse folders and view files over a cellular connection.

      -

      OS X automatically creates new "default" files for some items when they are cleaned from the system, including Finder Recent Items, Safari Cookies, etc. These newly created files are simply empty or template files used by the operating system to store data going forward. This is normal behavior for the operating system, and does not indicate any problems in cleaning the files.

      -

      The above guide was all about disabling private browsing on Safari. You must know that this will not though, prevent the concerned person from using incognito mode or private browsing in any other browser like Chrome, Firefox, etc. that is installed on the Mac.

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Cdroller License Key Free Crack How to Restore Your Disc Data in Minutes.md b/spaces/contluForse/HuggingGPT/assets/Cdroller License Key Free Crack How to Restore Your Disc Data in Minutes.md deleted file mode 100644 index 87ddcf891b13ea72cf94379c15bd3c346a298a63..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Cdroller License Key Free Crack How to Restore Your Disc Data in Minutes.md +++ /dev/null @@ -1,6 +0,0 @@ -
      -

      2. Keep your computer protected - cracked CDRoller hides security risks.
      Piracy and security threats come hand in hand, usually because they are done by the same people. When using a cracked copy of CDRoller you take the risk of being infected with a virus, Trojan horse, malware, spyware and other unpleasant surprises hidden in the keygen or the software itself. Usually, the problems caused by malicious code will cost you much more than the money you would pay for CDRoller license.

      -

      Cdroller License Key Free Crack


      DOWNLOAD ►►►►► https://ssurll.com/2uzxBH



      -

      Using warez version, crack, warez passwords, patches, serial numbers, registration codes, key generator, pirate key, keymaker or keygen forCDRoller 11.40.60.0 license key is illegal and prevent future development ofCDRoller 11.40.60.0. Download links are directly from our mirrors or publisher's website,CDRoller 11.40.60.0 torrent files or shared files from free file sharing and free upload services,including CDRoller 11.40.60.0 Rapidshare, MegaUpload, HellShare, HotFile, FileServe, YouSendIt, SendSpace, DepositFiles, Letitbit, MailBigFile, DropSend, MediaMax, LeapFile, zUpload, MyOtherDrive, DivShare or MediaFire,are not allowed!

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/coraKong/WorldSimulation/plugins/DisasterPlugin.py b/spaces/coraKong/WorldSimulation/plugins/DisasterPlugin.py deleted file mode 100644 index 5ad5863b855e2a0a4428b23e66fddd1822f630cd..0000000000000000000000000000000000000000 --- a/spaces/coraKong/WorldSimulation/plugins/DisasterPlugin.py +++ /dev/null @@ -1,18 +0,0 @@ -import random - -class DisasterPlugin: - def __init__(self, disaster_frequency, disaster_prob): - self.disaster_frequency = disaster_frequency - self.disaster_prob = disaster_prob - - def trigger_disaster(self, characters, character_die_callback): - print("发生自然灾难...") - num_killed = int(len(characters) * self.disaster_prob) - killed = random.sample(characters, num_killed) - for c in killed: - c.history.append(f"{c.real_age}岁,死于自然灾难") - character_die_callback(c) - - def execute(self, characters, round_num, character_die_callback): - if round_num % self.disaster_frequency == 0 and random.random() < self.disaster_prob: - self.trigger_disaster(characters, character_die_callback) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/dist_utils.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/dist_utils.py deleted file mode 100644 index d3a1ef3fda5ceeb31bf15a73779da1b1903ab0fe..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/dist_utils.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import functools -import os -import subprocess -from collections import OrderedDict - -import torch -import torch.multiprocessing as mp -from torch import distributed as dist -from torch._utils import (_flatten_dense_tensors, _take_tensors, - _unflatten_dense_tensors) - - -def init_dist(launcher, backend='nccl', **kwargs): - if mp.get_start_method(allow_none=True) is None: - mp.set_start_method('spawn') - if launcher == 'pytorch': - _init_dist_pytorch(backend, **kwargs) - elif launcher == 'mpi': - _init_dist_mpi(backend, **kwargs) - elif launcher == 'slurm': - _init_dist_slurm(backend, **kwargs) - else: - raise ValueError(f'Invalid launcher type: {launcher}') - - -def _init_dist_pytorch(backend, **kwargs): - # TODO: use local_rank instead of rank % num_gpus - rank = int(os.environ['RANK']) - num_gpus = torch.cuda.device_count() - torch.cuda.set_device(rank % num_gpus) - dist.init_process_group(backend=backend, **kwargs) - - -def _init_dist_mpi(backend, **kwargs): - # TODO: use local_rank instead of rank % num_gpus - rank = int(os.environ['OMPI_COMM_WORLD_RANK']) - num_gpus = torch.cuda.device_count() - torch.cuda.set_device(rank % num_gpus) - dist.init_process_group(backend=backend, **kwargs) - - -def _init_dist_slurm(backend, port=None): - """Initialize slurm distributed training environment. - - If argument ``port`` is not specified, then the master port will be system - environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system - environment variable, then a default port ``29500`` will be used. - - Args: - backend (str): Backend of torch.distributed. - port (int, optional): Master port. Defaults to None. - """ - proc_id = int(os.environ['SLURM_PROCID']) - ntasks = int(os.environ['SLURM_NTASKS']) - node_list = os.environ['SLURM_NODELIST'] - num_gpus = torch.cuda.device_count() - torch.cuda.set_device(proc_id % num_gpus) - addr = subprocess.getoutput( - f'scontrol show hostname {node_list} | head -n1') - # specify master port - if port is not None: - os.environ['MASTER_PORT'] = str(port) - elif 'MASTER_PORT' in os.environ: - pass # use MASTER_PORT in the environment variable - else: - # 29500 is torch.distributed default port - os.environ['MASTER_PORT'] = '29500' - # use MASTER_ADDR in the environment variable if it already exists - if 'MASTER_ADDR' not in os.environ: - os.environ['MASTER_ADDR'] = addr - os.environ['WORLD_SIZE'] = str(ntasks) - os.environ['LOCAL_RANK'] = str(proc_id % num_gpus) - os.environ['RANK'] = str(proc_id) - dist.init_process_group(backend=backend) - - -def get_dist_info(): - if dist.is_available() and dist.is_initialized(): - rank = dist.get_rank() - world_size = dist.get_world_size() - else: - rank = 0 - world_size = 1 - return rank, world_size - - -def master_only(func): - - @functools.wraps(func) - def wrapper(*args, **kwargs): - rank, _ = get_dist_info() - if rank == 0: - return func(*args, **kwargs) - - return wrapper - - -def allreduce_params(params, coalesce=True, bucket_size_mb=-1): - """Allreduce parameters. - - Args: - params (list[torch.Parameters]): List of parameters or buffers of a - model. - coalesce (bool, optional): Whether allreduce parameters as a whole. - Defaults to True. - bucket_size_mb (int, optional): Size of bucket, the unit is MB. - Defaults to -1. - """ - _, world_size = get_dist_info() - if world_size == 1: - return - params = [param.data for param in params] - if coalesce: - _allreduce_coalesced(params, world_size, bucket_size_mb) - else: - for tensor in params: - dist.all_reduce(tensor.div_(world_size)) - - -def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): - """Allreduce gradients. - - Args: - params (list[torch.Parameters]): List of parameters of a model - coalesce (bool, optional): Whether allreduce parameters as a whole. - Defaults to True. - bucket_size_mb (int, optional): Size of bucket, the unit is MB. - Defaults to -1. - """ - grads = [ - param.grad.data for param in params - if param.requires_grad and param.grad is not None - ] - _, world_size = get_dist_info() - if world_size == 1: - return - if coalesce: - _allreduce_coalesced(grads, world_size, bucket_size_mb) - else: - for tensor in grads: - dist.all_reduce(tensor.div_(world_size)) - - -def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): - if bucket_size_mb > 0: - bucket_size_bytes = bucket_size_mb * 1024 * 1024 - buckets = _take_tensors(tensors, bucket_size_bytes) - else: - buckets = OrderedDict() - for tensor in tensors: - tp = tensor.type() - if tp not in buckets: - buckets[tp] = [] - buckets[tp].append(tensor) - buckets = buckets.values() - - for bucket in buckets: - flat_tensors = _flatten_dense_tensors(bucket) - dist.all_reduce(flat_tensors) - flat_tensors.div_(world_size) - for tensor, synced in zip( - bucket, _unflatten_dense_tensors(flat_tensors, bucket)): - tensor.copy_(synced) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/log_buffer.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/log_buffer.py deleted file mode 100644 index d949e2941c5400088c7cd8a1dc893d8b233ae785..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/log_buffer.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from collections import OrderedDict - -import numpy as np - - -class LogBuffer: - - def __init__(self): - self.val_history = OrderedDict() - self.n_history = OrderedDict() - self.output = OrderedDict() - self.ready = False - - def clear(self): - self.val_history.clear() - self.n_history.clear() - self.clear_output() - - def clear_output(self): - self.output.clear() - self.ready = False - - def update(self, vars, count=1): - assert isinstance(vars, dict) - for key, var in vars.items(): - if key not in self.val_history: - self.val_history[key] = [] - self.n_history[key] = [] - self.val_history[key].append(var) - self.n_history[key].append(count) - - def average(self, n=0): - """Average latest n values or all values.""" - assert n >= 0 - for key in self.val_history: - values = np.array(self.val_history[key][-n:]) - nums = np.array(self.n_history[key][-n:]) - avg = np.sum(values * nums) / np.sum(nums) - self.output[key] = avg - self.ready = True diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/deform_conv.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/deform_conv.py deleted file mode 100644 index 49547238bcc67775bbeb97184467c3c8eee8df60..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/deform_conv.py +++ /dev/null @@ -1,514 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import math -from functools import lru_cache -import torch -from torch import nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair -from torchvision.ops import deform_conv2d - -from annotator.oneformer.detectron2.utils.develop import create_dummy_class, create_dummy_func - -from .wrappers import _NewEmptyTensorOp - - -class _DeformConv(Function): - @staticmethod - def forward( - ctx, - input, - offset, - weight, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - im2col_step=64, - ): - if input is not None and input.dim() != 4: - raise ValueError( - "Expected 4D tensor as input, got {}D tensor instead.".format(input.dim()) - ) - ctx.stride = _pair(stride) - ctx.padding = _pair(padding) - ctx.dilation = _pair(dilation) - ctx.groups = groups - ctx.deformable_groups = deformable_groups - ctx.im2col_step = im2col_step - - ctx.save_for_backward(input, offset, weight) - - output = input.new_empty( - _DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride) - ) - - ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones - - if not input.is_cuda: - # TODO: let torchvision support full features of our deformconv. - if deformable_groups != 1: - raise NotImplementedError( - "Deformable Conv with deformable_groups != 1 is not supported on CPUs!" - ) - return deform_conv2d( - input, offset, weight, stride=stride, padding=padding, dilation=dilation - ) - else: - cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) - assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" - - _C.deform_conv_forward( - input, - weight, - offset, - output, - ctx.bufs_[0], - ctx.bufs_[1], - weight.size(3), - weight.size(2), - ctx.stride[1], - ctx.stride[0], - ctx.padding[1], - ctx.padding[0], - ctx.dilation[1], - ctx.dilation[0], - ctx.groups, - ctx.deformable_groups, - cur_im2col_step, - ) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, offset, weight = ctx.saved_tensors - - grad_input = grad_offset = grad_weight = None - - if not grad_output.is_cuda: - raise NotImplementedError("Deformable Conv is not supported on CPUs!") - else: - cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step) - assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize" - - if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]: - grad_input = torch.zeros_like(input) - grad_offset = torch.zeros_like(offset) - _C.deform_conv_backward_input( - input, - offset, - grad_output, - grad_input, - grad_offset, - weight, - ctx.bufs_[0], - weight.size(3), - weight.size(2), - ctx.stride[1], - ctx.stride[0], - ctx.padding[1], - ctx.padding[0], - ctx.dilation[1], - ctx.dilation[0], - ctx.groups, - ctx.deformable_groups, - cur_im2col_step, - ) - - if ctx.needs_input_grad[2]: - grad_weight = torch.zeros_like(weight) - _C.deform_conv_backward_filter( - input, - offset, - grad_output, - grad_weight, - ctx.bufs_[0], - ctx.bufs_[1], - weight.size(3), - weight.size(2), - ctx.stride[1], - ctx.stride[0], - ctx.padding[1], - ctx.padding[0], - ctx.dilation[1], - ctx.dilation[0], - ctx.groups, - ctx.deformable_groups, - 1, - cur_im2col_step, - ) - - return grad_input, grad_offset, grad_weight, None, None, None, None, None, None - - @staticmethod - def _output_size(input, weight, padding, dilation, stride): - channels = weight.size(0) - output_size = (input.size(0), channels) - for d in range(input.dim() - 2): - in_size = input.size(d + 2) - pad = padding[d] - kernel = dilation[d] * (weight.size(d + 2) - 1) + 1 - stride_ = stride[d] - output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,) - if not all(map(lambda s: s > 0, output_size)): - raise ValueError( - "convolution input is too small (output would be {})".format( - "x".join(map(str, output_size)) - ) - ) - return output_size - - @staticmethod - @lru_cache(maxsize=128) - def _cal_im2col_step(input_size, default_size): - """ - Calculate proper im2col step size, which should be divisible by input_size and not larger - than prefer_size. Meanwhile the step size should be as large as possible to be more - efficient. So we choose the largest one among all divisors of input_size which are smaller - than prefer_size. - :param input_size: input batch size . - :param default_size: default preferred im2col step size. - :return: the largest proper step size. - """ - if input_size <= default_size: - return input_size - best_step = 1 - for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)): - if input_size % step == 0: - if input_size // step <= default_size: - return input_size // step - best_step = step - - return best_step - - -class _ModulatedDeformConv(Function): - @staticmethod - def forward( - ctx, - input, - offset, - mask, - weight, - bias=None, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - ): - ctx.stride = stride - ctx.padding = padding - ctx.dilation = dilation - ctx.groups = groups - ctx.deformable_groups = deformable_groups - ctx.with_bias = bias is not None - if not ctx.with_bias: - bias = input.new_empty(1) # fake tensor - if not input.is_cuda: - raise NotImplementedError("Deformable Conv is not supported on CPUs!") - if ( - weight.requires_grad - or mask.requires_grad - or offset.requires_grad - or input.requires_grad - ): - ctx.save_for_backward(input, offset, mask, weight, bias) - output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight)) - ctx._bufs = [input.new_empty(0), input.new_empty(0)] - _C.modulated_deform_conv_forward( - input, - weight, - bias, - ctx._bufs[0], - offset, - mask, - output, - ctx._bufs[1], - weight.shape[2], - weight.shape[3], - ctx.stride, - ctx.stride, - ctx.padding, - ctx.padding, - ctx.dilation, - ctx.dilation, - ctx.groups, - ctx.deformable_groups, - ctx.with_bias, - ) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - if not grad_output.is_cuda: - raise NotImplementedError("Deformable Conv is not supported on CPUs!") - input, offset, mask, weight, bias = ctx.saved_tensors - grad_input = torch.zeros_like(input) - grad_offset = torch.zeros_like(offset) - grad_mask = torch.zeros_like(mask) - grad_weight = torch.zeros_like(weight) - grad_bias = torch.zeros_like(bias) - _C.modulated_deform_conv_backward( - input, - weight, - bias, - ctx._bufs[0], - offset, - mask, - ctx._bufs[1], - grad_input, - grad_weight, - grad_bias, - grad_offset, - grad_mask, - grad_output, - weight.shape[2], - weight.shape[3], - ctx.stride, - ctx.stride, - ctx.padding, - ctx.padding, - ctx.dilation, - ctx.dilation, - ctx.groups, - ctx.deformable_groups, - ctx.with_bias, - ) - if not ctx.with_bias: - grad_bias = None - - return ( - grad_input, - grad_offset, - grad_mask, - grad_weight, - grad_bias, - None, - None, - None, - None, - None, - ) - - @staticmethod - def _infer_shape(ctx, input, weight): - n = input.size(0) - channels_out = weight.size(0) - height, width = input.shape[2:4] - kernel_h, kernel_w = weight.shape[2:4] - height_out = ( - height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1) - ) // ctx.stride + 1 - width_out = ( - width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1) - ) // ctx.stride + 1 - return n, channels_out, height_out, width_out - - -deform_conv = _DeformConv.apply -modulated_deform_conv = _ModulatedDeformConv.apply - - -class DeformConv(nn.Module): - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - bias=False, - norm=None, - activation=None, - ): - """ - Deformable convolution from :paper:`deformconv`. - - Arguments are similar to :class:`Conv2D`. Extra arguments: - - Args: - deformable_groups (int): number of groups used in deformable convolution. - norm (nn.Module, optional): a normalization layer - activation (callable(Tensor) -> Tensor): a callable activation function - """ - super(DeformConv, self).__init__() - - assert not bias - assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format( - in_channels, groups - ) - assert ( - out_channels % groups == 0 - ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups) - - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.stride = _pair(stride) - self.padding = _pair(padding) - self.dilation = _pair(dilation) - self.groups = groups - self.deformable_groups = deformable_groups - self.norm = norm - self.activation = activation - - self.weight = nn.Parameter( - torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size) - ) - self.bias = None - - nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") - - def forward(self, x, offset): - if x.numel() == 0: - # When input is empty, we want to return a empty tensor with "correct" shape, - # So that the following operations will not panic - # if they check for the shape of the tensor. - # This computes the height and width of the output tensor - output_shape = [ - (i + 2 * p - (di * (k - 1) + 1)) // s + 1 - for i, p, di, k, s in zip( - x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride - ) - ] - output_shape = [x.shape[0], self.weight.shape[0]] + output_shape - return _NewEmptyTensorOp.apply(x, output_shape) - - x = deform_conv( - x, - offset, - self.weight, - self.stride, - self.padding, - self.dilation, - self.groups, - self.deformable_groups, - ) - if self.norm is not None: - x = self.norm(x) - if self.activation is not None: - x = self.activation(x) - return x - - def extra_repr(self): - tmpstr = "in_channels=" + str(self.in_channels) - tmpstr += ", out_channels=" + str(self.out_channels) - tmpstr += ", kernel_size=" + str(self.kernel_size) - tmpstr += ", stride=" + str(self.stride) - tmpstr += ", padding=" + str(self.padding) - tmpstr += ", dilation=" + str(self.dilation) - tmpstr += ", groups=" + str(self.groups) - tmpstr += ", deformable_groups=" + str(self.deformable_groups) - tmpstr += ", bias=False" - return tmpstr - - -class ModulatedDeformConv(nn.Module): - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - deformable_groups=1, - bias=True, - norm=None, - activation=None, - ): - """ - Modulated deformable convolution from :paper:`deformconv2`. - - Arguments are similar to :class:`Conv2D`. Extra arguments: - - Args: - deformable_groups (int): number of groups used in deformable convolution. - norm (nn.Module, optional): a normalization layer - activation (callable(Tensor) -> Tensor): a callable activation function - """ - super(ModulatedDeformConv, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.stride = stride - self.padding = padding - self.dilation = dilation - self.groups = groups - self.deformable_groups = deformable_groups - self.with_bias = bias - self.norm = norm - self.activation = activation - - self.weight = nn.Parameter( - torch.Tensor(out_channels, in_channels // groups, *self.kernel_size) - ) - if bias: - self.bias = nn.Parameter(torch.Tensor(out_channels)) - else: - self.bias = None - - nn.init.kaiming_uniform_(self.weight, nonlinearity="relu") - if self.bias is not None: - nn.init.constant_(self.bias, 0) - - def forward(self, x, offset, mask): - if x.numel() == 0: - output_shape = [ - (i + 2 * p - (di * (k - 1) + 1)) // s + 1 - for i, p, di, k, s in zip( - x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride - ) - ] - output_shape = [x.shape[0], self.weight.shape[0]] + output_shape - return _NewEmptyTensorOp.apply(x, output_shape) - - x = modulated_deform_conv( - x, - offset, - mask, - self.weight, - self.bias, - self.stride, - self.padding, - self.dilation, - self.groups, - self.deformable_groups, - ) - if self.norm is not None: - x = self.norm(x) - if self.activation is not None: - x = self.activation(x) - return x - - def extra_repr(self): - tmpstr = "in_channels=" + str(self.in_channels) - tmpstr += ", out_channels=" + str(self.out_channels) - tmpstr += ", kernel_size=" + str(self.kernel_size) - tmpstr += ", stride=" + str(self.stride) - tmpstr += ", padding=" + str(self.padding) - tmpstr += ", dilation=" + str(self.dilation) - tmpstr += ", groups=" + str(self.groups) - tmpstr += ", deformable_groups=" + str(self.deformable_groups) - tmpstr += ", bias=" + str(self.with_bias) - return tmpstr - - -try: - from annotator.oneformer.detectron2 import _C -except ImportError: - # TODO: register ops natively so there is no need to import _C. - _msg = "detectron2 is not compiled successfully, please build following the instructions!" - _args = ("detectron2._C", _msg) - DeformConv = create_dummy_class("DeformConv", *_args) - ModulatedDeformConv = create_dummy_class("ModulatedDeformConv", *_args) - deform_conv = create_dummy_func("deform_conv", *_args) - modulated_deform_conv = create_dummy_func("modulated_deform_conv", *_args) diff --git a/spaces/crylake/img2poem/query2labels/q2l_infer.py b/spaces/crylake/img2poem/query2labels/q2l_infer.py deleted file mode 100644 index 214ddfa6d0563dc8ae9cb8114a81af5e02d1468c..0000000000000000000000000000000000000000 --- a/spaces/crylake/img2poem/query2labels/q2l_infer.py +++ /dev/null @@ -1,378 +0,0 @@ -import argparse -import os, sys -import random -import datetime -import time -from typing import List -import json -import numpy as np - -import torch -import torch.nn as nn -import torch.nn.parallel -import torch.backends.cudnn as cudnn -import torch.distributed as dist -import torch.optim -import torch.utils.data -import torch.utils.data.distributed - -import _init_paths -from dataset.get_dataset import get_datasets - - -from utils.logger import setup_logger -import models -import models.aslloss -from models.query2label import build_q2l -from utils.metric import voc_mAP -from utils.misc import clean_state_dict -from utils.slconfig import get_raw_dict - - -def parser_args(): - available_models = ['Q2L-R101-448', 'Q2L-R101-576', 'Q2L-TResL-448', 'Q2L-TResL_22k-448', 'Q2L-SwinL-384', 'Q2L-CvT_w24-384'] - - parser = argparse.ArgumentParser(description='Query2Label for multilabel classification') - parser.add_argument('--dataname', help='dataname', default='coco14', choices=['coco14']) - parser.add_argument('--dataset_dir', help='dir of dataset', default='/comp_robot/liushilong/data/COCO14/') - - parser.add_argument('--img_size', default=448, type=int, - help='image size. default(448)') - parser.add_argument('-a', '--arch', metavar='ARCH', default='Q2L-R101-448', - choices=available_models, - help='model architecture: ' + - ' | '.join(available_models) + - ' (default: Q2L-R101-448)') - parser.add_argument('--config', type=str, help='config file') - - parser.add_argument('--output', metavar='DIR', - help='path to output folder') - parser.add_argument('--loss', metavar='LOSS', default='asl', - choices=['asl'], - help='loss functin') - parser.add_argument('--num_class', default=80, type=int, - help="Number of classes.") - parser.add_argument('-j', '--workers', default=8, type=int, metavar='N', - help='number of data loading workers (default: 8)') - parser.add_argument('-b', '--batch-size', default=16, type=int, - metavar='N', - help='mini-batch size (default: 16), this is the total ' - 'batch size of all GPUs') - parser.add_argument('-p', '--print-freq', default=10, type=int, - metavar='N', help='print frequency (default: 10)') - parser.add_argument('--resume', type=str, metavar='PATH', - help='path to latest checkpoint (default: none)') - - parser.add_argument('--pretrained', dest='pretrained', action='store_true', - help='use pre-trained model. default is False. ') - - parser.add_argument('--eps', default=1e-5, type=float, - help='eps for focal loss (default: 1e-5)') - - # distribution training - parser.add_argument('--world-size', default=-1, type=int, - help='number of nodes for distributed training') - parser.add_argument('--rank', default=-1, type=int, - help='node rank for distributed training') - parser.add_argument('--dist-url', default='tcp://127.0.0.1:3451', type=str, - help='url used to set up distributed training') - parser.add_argument('--seed', default=None, type=int, - help='seed for initializing training. ') - parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel') - parser.add_argument('--amp', action='store_true', - help='use mixture precision.') - # data aug - parser.add_argument('--orid_norm', action='store_true', default=False, - help='using oridinary norm of [0,0,0] and [1,1,1] for mean and std.') - - - # * Transformer - parser.add_argument('--enc_layers', default=1, type=int, - help="Number of encoding layers in the transformer") - parser.add_argument('--dec_layers', default=2, type=int, - help="Number of decoding layers in the transformer") - parser.add_argument('--dim_feedforward', default=256, type=int, - help="Intermediate size of the feedforward layers in the transformer blocks") - parser.add_argument('--hidden_dim', default=128, type=int, - help="Size of the embeddings (dimension of the transformer)") - parser.add_argument('--dropout', default=0.1, type=float, - help="Dropout applied in the transformer") - parser.add_argument('--nheads', default=4, type=int, - help="Number of attention heads inside the transformer's attentions") - parser.add_argument('--pre_norm', action='store_true') - parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine'), - help="Type of positional embedding to use on top of the image features") - parser.add_argument('--backbone', default='resnet101', type=str, - help="Name of the convolutional backbone to use") - parser.add_argument('--keep_other_self_attn_dec', action='store_true', - help='keep the other self attention modules in transformer decoders, which will be removed default.') - parser.add_argument('--keep_first_self_attn_dec', action='store_true', - help='keep the first self attention module in transformer decoders, which will be removed default.') - parser.add_argument('--keep_input_proj', action='store_true', - help="keep the input projection layer. Needed when the channel of image features is different from hidden_dim of Transformer layers.") - args = parser.parse_args() - - # update parameters with pre-defined config file - if args.config: - with open(args.config, 'r') as f: - cfg_dict = json.load(f) - for k,v in cfg_dict.items(): - setattr(args, k, v) - - return args - -def get_args(): - args = parser_args() - return args - - -best_mAP = 0 - -def main(): - args = get_args() - - if 'WORLD_SIZE' in os.environ: - assert args.world_size > 0, 'please set --world-size and --rank in the command line' - # launch by torch.distributed.launch - # Single node - # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ... - local_world_size = int(os.environ['WORLD_SIZE']) - args.world_size = args.world_size * local_world_size - args.rank = args.rank * local_world_size + args.local_rank - print('world size: {}, world rank: {}, local rank: {}'.format(args.world_size, args.rank, args.local_rank)) - print('os.environ:', os.environ) - else: - # single process, useful for debugging - # python main.py ... - args.world_size = 1 - args.rank = 0 - args.local_rank = 0 - - if args.seed is not None: - random.seed(args.seed) - torch.manual_seed(args.seed) - np.random.seed(args.seed) - - torch.cuda.set_device(args.local_rank) - print('| distributed init (local_rank {}): {}'.format( - args.local_rank, args.dist_url), flush=True) - torch.distributed.init_process_group(backend='nccl', init_method=args.dist_url, - world_size=args.world_size, rank=args.rank) - cudnn.benchmark = True - - # set output dir and logger - if not args.output: - args.output = (f"logs/{args.arch}-{datetime.datetime.now()}").replace(' ', '-') - os.makedirs(args.output, exist_ok=True) - logger = setup_logger(output=args.output, distributed_rank=dist.get_rank(), color=False, name="Q2L") - logger.info("Command: "+' '.join(sys.argv)) - - - # save config to outputdir - if dist.get_rank() == 0: - path = os.path.join(args.output, "config.json") - with open(path, 'w') as f: - json.dump(get_raw_dict(args), f, indent=2) - logger.info("Full config saved to {}".format(path)) - - logger.info('world size: {}'.format(dist.get_world_size())) - logger.info('dist.get_rank(): {}'.format(dist.get_rank())) - logger.info('local_rank: {}'.format(args.local_rank)) - - return main_worker(args, logger) - -def main_worker(args, logger): - global best_mAP - - # build model - model = build_q2l(args) - model = model.cuda() - model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], broadcast_buffers=False) - criterion = models.aslloss.AsymmetricLossOptimized( - gamma_neg=args.gamma_neg, gamma_pos=args.gamma_pos, - disable_torch_grad_focal_loss=True, - eps=args.eps, - ) - - - # optionally resume from a checkpoint - if args.resume: - if os.path.isfile(args.resume): - logger.info("=> loading checkpoint '{}'".format(args.resume)) - checkpoint = torch.load(args.resume, map_location=torch.device(dist.get_rank())) - state_dict = clean_state_dict(checkpoint['state_dict']) - model.module.load_state_dict(state_dict, strict=True) - del checkpoint - del state_dict - torch.cuda.empty_cache() - else: - logger.info("=> no checkpoint found at '{}'".format(args.resume)) - - # Data loading code - _, val_dataset = get_datasets(args) - assert args.batch_size // dist.get_world_size() == args.batch_size / dist.get_world_size(), 'Batch size is not divisible by num of gpus.' - val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False) - val_loader = torch.utils.data.DataLoader( - val_dataset, batch_size=args.batch_size // dist.get_world_size(), shuffle=False, - num_workers=args.workers, pin_memory=True, sampler=val_sampler) - - - # for eval only - _, mAP = validate(val_loader, model, criterion, args, logger) - logger.info(' * mAP {mAP:.1f}' - .format(mAP=mAP)) - return - - - -@torch.no_grad() -def validate(val_loader, model, criterion, args, logger): - batch_time = AverageMeter('Time', ':5.3f') - losses = AverageMeter('Loss', ':5.3f') - mem = AverageMeter('Mem', ':.0f', val_only=True) - - progress = ProgressMeter( - len(val_loader), - [batch_time, losses, mem], - prefix='Test: ') - - # switch to evaluate mode - model.eval() - saved_data = [] - with torch.no_grad(): - end = time.time() - for i, (images, target) in enumerate(val_loader): - images = images.cuda(non_blocking=True) - target = target.cuda(non_blocking=True) - - # compute output - with torch.cuda.amp.autocast(enabled=args.amp): - output = model(images) - loss = criterion(output, target) - output_sm = nn.functional.sigmoid(output) - - # record loss - losses.update(loss.item(), images.size(0)) - mem.update(torch.cuda.max_memory_allocated() / 1024.0 / 1024.0) - - # save some data - _item = torch.cat((output_sm.detach().cpu(), target.detach().cpu()), 1) - saved_data.append(_item) - - # measure elapsed time - batch_time.update(time.time() - end) - end = time.time() - - if i % args.print_freq == 0 and dist.get_rank() == 0: - progress.display(i, logger) - - logger.info('=> synchronize...') - if dist.get_world_size() > 1: - dist.barrier() - loss_avg, = map( - _meter_reduce if dist.get_world_size() > 1 else lambda x: x.avg, - [losses] - ) - - # calculate mAP - saved_data = torch.cat(saved_data, 0).numpy() - saved_name = 'saved_data_tmp.{}.txt'.format(dist.get_rank()) - np.savetxt(os.path.join(args.output, saved_name), saved_data) - if dist.get_world_size() > 1: - dist.barrier() - - if dist.get_rank() == 0: - print("Calculating mAP:") - filenamelist = ['saved_data_tmp.{}.txt'.format(ii) for ii in range(dist.get_world_size())] - metric_func = voc_mAP - mAP, aps = metric_func([os.path.join(args.output, _filename) for _filename in filenamelist], args.num_class, return_each=True) - - logger.info(" mAP: {}".format(mAP)) - logger.info(" aps: {}".format(np.array2string(aps, precision=5))) - else: - mAP = 0 - - if dist.get_world_size() > 1: - dist.barrier() - - return loss_avg, mAP - - -################################################################################## - -def _meter_reduce(meter): - meter_sum = torch.FloatTensor([meter.sum]).cuda() - meter_count = torch.FloatTensor([meter.count]).cuda() - torch.distributed.reduce(meter_sum, 0) - torch.distributed.reduce(meter_count, 0) - meter_avg = meter_sum / meter_count - - return meter_avg.item() - - -def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'): - # torch.save(state, filename) - if is_best: - torch.save(state, os.path.split(filename)[0] + '/model_best.pth.tar') - # shutil.copyfile(filename, os.path.split(filename)[0] + '/model_best.pth.tar') - - -class AverageMeter(object): - """Computes and stores the average and current value""" - def __init__(self, name, fmt=':f', val_only=False): - self.name = name - self.fmt = fmt - self.val_only = val_only - self.reset() - - def reset(self): - self.val = 0 - self.avg = 0 - self.sum = 0 - self.count = 0 - - def update(self, val, n=1): - self.val = val - self.sum += val * n - self.count += n - self.avg = self.sum / self.count - - def __str__(self): - if self.val_only: - fmtstr = '{name} {val' + self.fmt + '}' - else: - fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' - return fmtstr.format(**self.__dict__) - - -class ProgressMeter(object): - def __init__(self, num_batches, meters, prefix=""): - self.batch_fmtstr = self._get_batch_fmtstr(num_batches) - self.meters = meters - self.prefix = prefix - - def display(self, batch, logger): - entries = [self.prefix + self.batch_fmtstr.format(batch)] - entries += [str(meter) for meter in self.meters] - logger.info(' '.join(entries)) - - def _get_batch_fmtstr(self, num_batches): - num_digits = len(str(num_batches // 1)) - fmt = '{:' + str(num_digits) + 'd}' - return '[' + fmt + '/' + fmt.format(num_batches) + ']' - - -def kill_process(filename:str, holdpid:int) -> List[str]: - # used for training only. - import subprocess, signal - res = subprocess.check_output("ps aux | grep {} | grep -v grep | awk '{{print $2}}'".format(filename), shell=True, cwd="./") - res = res.decode('utf-8') - idlist = [i.strip() for i in res.split('\n') if i != ''] - print("kill: {}".format(idlist)) - for idname in idlist: - if idname != str(holdpid): - os.kill(int(idname), signal.SIGKILL) - return idlist - -if __name__ == '__main__': - main() diff --git a/spaces/cvlab/zero123-live/ldm/data/nerf_like.py b/spaces/cvlab/zero123-live/ldm/data/nerf_like.py deleted file mode 100644 index 84ef18288db005c72d3b5832144a7bd5cfffe9b2..0000000000000000000000000000000000000000 --- a/spaces/cvlab/zero123-live/ldm/data/nerf_like.py +++ /dev/null @@ -1,165 +0,0 @@ -from torch.utils.data import Dataset -import os -import json -import numpy as np -import torch -import imageio -import math -import cv2 -from torchvision import transforms - -def cartesian_to_spherical(xyz): - ptsnew = np.hstack((xyz, np.zeros(xyz.shape))) - xy = xyz[:,0]**2 + xyz[:,1]**2 - z = np.sqrt(xy + xyz[:,2]**2) - theta = np.arctan2(np.sqrt(xy), xyz[:,2]) # for elevation angle defined from Z-axis down - #ptsnew[:,4] = np.arctan2(xyz[:,2], np.sqrt(xy)) # for elevation angle defined from XY-plane up - azimuth = np.arctan2(xyz[:,1], xyz[:,0]) - return np.array([theta, azimuth, z]) - - -def get_T(T_target, T_cond): - theta_cond, azimuth_cond, z_cond = cartesian_to_spherical(T_cond[None, :]) - theta_target, azimuth_target, z_target = cartesian_to_spherical(T_target[None, :]) - - d_theta = theta_target - theta_cond - d_azimuth = (azimuth_target - azimuth_cond) % (2 * math.pi) - d_z = z_target - z_cond - - d_T = torch.tensor([d_theta.item(), math.sin(d_azimuth.item()), math.cos(d_azimuth.item()), d_z.item()]) - return d_T - -def get_spherical(T_target, T_cond): - theta_cond, azimuth_cond, z_cond = cartesian_to_spherical(T_cond[None, :]) - theta_target, azimuth_target, z_target = cartesian_to_spherical(T_target[None, :]) - - d_theta = theta_target - theta_cond - d_azimuth = (azimuth_target - azimuth_cond) % (2 * math.pi) - d_z = z_target - z_cond - - d_T = torch.tensor([math.degrees(d_theta.item()), math.degrees(d_azimuth.item()), d_z.item()]) - return d_T - -class RTMV(Dataset): - def __init__(self, root_dir='datasets/RTMV/google_scanned',\ - first_K=64, resolution=256, load_target=False): - self.root_dir = root_dir - self.scene_list = sorted(next(os.walk(root_dir))[1]) - self.resolution = resolution - self.first_K = first_K - self.load_target = load_target - - def __len__(self): - return len(self.scene_list) - - def __getitem__(self, idx): - scene_dir = os.path.join(self.root_dir, self.scene_list[idx]) - with open(os.path.join(scene_dir, 'transforms.json'), "r") as f: - meta = json.load(f) - imgs = [] - poses = [] - for i_img in range(self.first_K): - meta_img = meta['frames'][i_img] - - if i_img == 0 or self.load_target: - img_path = os.path.join(scene_dir, meta_img['file_path']) - img = imageio.imread(img_path) - img = cv2.resize(img, (self.resolution, self.resolution), interpolation = cv2.INTER_LINEAR) - imgs.append(img) - - c2w = meta_img['transform_matrix'] - poses.append(c2w) - - imgs = (np.array(imgs) / 255.).astype(np.float32) # (RGBA) imgs - imgs = torch.tensor(self.blend_rgba(imgs)).permute(0, 3, 1, 2) - imgs = imgs * 2 - 1. # convert to stable diffusion range - poses = torch.tensor(np.array(poses).astype(np.float32)) - return imgs, poses - - def blend_rgba(self, img): - img = img[..., :3] * img[..., -1:] + (1. - img[..., -1:]) # blend A to RGB - return img - - -class GSO(Dataset): - def __init__(self, root_dir='datasets/GoogleScannedObjects',\ - split='val', first_K=5, resolution=256, load_target=False, name='render_mvs'): - self.root_dir = root_dir - with open(os.path.join(root_dir, '%s.json' % split), "r") as f: - self.scene_list = json.load(f) - self.resolution = resolution - self.first_K = first_K - self.load_target = load_target - self.name = name - - def __len__(self): - return len(self.scene_list) - - def __getitem__(self, idx): - scene_dir = os.path.join(self.root_dir, self.scene_list[idx]) - with open(os.path.join(scene_dir, 'transforms_%s.json' % self.name), "r") as f: - meta = json.load(f) - imgs = [] - poses = [] - for i_img in range(self.first_K): - meta_img = meta['frames'][i_img] - - if i_img == 0 or self.load_target: - img_path = os.path.join(scene_dir, meta_img['file_path']) - img = imageio.imread(img_path) - img = cv2.resize(img, (self.resolution, self.resolution), interpolation = cv2.INTER_LINEAR) - imgs.append(img) - - c2w = meta_img['transform_matrix'] - poses.append(c2w) - - imgs = (np.array(imgs) / 255.).astype(np.float32) # (RGBA) imgs - mask = imgs[:, :, :, -1] - imgs = torch.tensor(self.blend_rgba(imgs)).permute(0, 3, 1, 2) - imgs = imgs * 2 - 1. # convert to stable diffusion range - poses = torch.tensor(np.array(poses).astype(np.float32)) - return imgs, poses - - def blend_rgba(self, img): - img = img[..., :3] * img[..., -1:] + (1. - img[..., -1:]) # blend A to RGB - return img - -class WILD(Dataset): - def __init__(self, root_dir='data/nerf_wild',\ - first_K=33, resolution=256, load_target=False): - self.root_dir = root_dir - self.scene_list = sorted(next(os.walk(root_dir))[1]) - self.resolution = resolution - self.first_K = first_K - self.load_target = load_target - - def __len__(self): - return len(self.scene_list) - - def __getitem__(self, idx): - scene_dir = os.path.join(self.root_dir, self.scene_list[idx]) - with open(os.path.join(scene_dir, 'transforms_train.json'), "r") as f: - meta = json.load(f) - imgs = [] - poses = [] - for i_img in range(self.first_K): - meta_img = meta['frames'][i_img] - - if i_img == 0 or self.load_target: - img_path = os.path.join(scene_dir, meta_img['file_path']) - img = imageio.imread(img_path + '.png') - img = cv2.resize(img, (self.resolution, self.resolution), interpolation = cv2.INTER_LINEAR) - imgs.append(img) - - c2w = meta_img['transform_matrix'] - poses.append(c2w) - - imgs = (np.array(imgs) / 255.).astype(np.float32) # (RGBA) imgs - imgs = torch.tensor(self.blend_rgba(imgs)).permute(0, 3, 1, 2) - imgs = imgs * 2 - 1. # convert to stable diffusion range - poses = torch.tensor(np.array(poses).astype(np.float32)) - return imgs, poses - - def blend_rgba(self, img): - img = img[..., :3] * img[..., -1:] + (1. - img[..., -1:]) # blend A to RGB - return img \ No newline at end of file diff --git a/spaces/cymic/Waifu_Diffusion_Webui/javascript/imageMaskFix.js b/spaces/cymic/Waifu_Diffusion_Webui/javascript/imageMaskFix.js deleted file mode 100644 index 3d77bfe9b45c13ef602098b4bd81586767430360..0000000000000000000000000000000000000000 --- a/spaces/cymic/Waifu_Diffusion_Webui/javascript/imageMaskFix.js +++ /dev/null @@ -1,45 +0,0 @@ -/** - * temporary fix for https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/668 - * @see https://github.com/gradio-app/gradio/issues/1721 - */ -window.addEventListener( 'resize', () => imageMaskResize()); -function imageMaskResize() { - const canvases = gradioApp().querySelectorAll('#img2maskimg .touch-none canvas'); - if ( ! canvases.length ) { - canvases_fixed = false; - window.removeEventListener( 'resize', imageMaskResize ); - return; - } - - const wrapper = canvases[0].closest('.touch-none'); - const previewImage = wrapper.previousElementSibling; - - if ( ! previewImage.complete ) { - previewImage.addEventListener( 'load', () => imageMaskResize()); - return; - } - - const w = previewImage.width; - const h = previewImage.height; - const nw = previewImage.naturalWidth; - const nh = previewImage.naturalHeight; - const portrait = nh > nw; - const factor = portrait; - - const wW = Math.min(w, portrait ? h/nh*nw : w/nw*nw); - const wH = Math.min(h, portrait ? h/nh*nh : w/nw*nh); - - wrapper.style.width = `${wW}px`; - wrapper.style.height = `${wH}px`; - wrapper.style.left = `${(w-wW)/2}px`; - wrapper.style.top = `${(h-wH)/2}px`; - - canvases.forEach( c => { - c.style.width = c.style.height = ''; - c.style.maxWidth = '100%'; - c.style.maxHeight = '100%'; - c.style.objectFit = 'contain'; - }); - } - - onUiUpdate(() => imageMaskResize()); \ No newline at end of file diff --git a/spaces/dakaiye/dky_xuexi/docs/self_analysis.md b/spaces/dakaiye/dky_xuexi/docs/self_analysis.md deleted file mode 100644 index ebc2337194974bf210794df7d858889010fecf08..0000000000000000000000000000000000000000 --- a/spaces/dakaiye/dky_xuexi/docs/self_analysis.md +++ /dev/null @@ -1,378 +0,0 @@ -# chatgpt-academic项目自译解报告 -(Author补充:以下分析均由本项目调用ChatGPT一键生成,如果有不准确的地方,全怪GPT😄) - - -| 文件名 | 功能描述 | -| ------ | ------ | -| check_proxy.py | 检查代理有效性及地理位置 | -| colorful.py | 控制台打印彩色文字 | -| config.py | 配置和参数设置 | -| config_private.py | 私人配置和参数设置 | -| core_functional.py | 核心函数和参数设置 | -| crazy_functional.py | 高级功能插件集合 | -| main.py | 一个 Chatbot 程序,提供各种学术翻译、文本处理和其他查询服务 | -| multi_language.py | 识别和翻译不同语言 | -| theme.py | 自定义 gradio 应用程序主题 | -| toolbox.py | 工具类库,用于协助实现各种功能 | -| crazy_functions\crazy_functions_test.py | 测试 crazy_functions 中的各种函数 | -| crazy_functions\crazy_utils.py | 工具函数,用于字符串处理、异常检测、Markdown 格式转换等 | -| crazy_functions\Latex全文润色.py | 对整个 Latex 项目进行润色和纠错 | -| crazy_functions\Latex全文翻译.py | 对整个 Latex 项目进行翻译 | -| crazy_functions\\_\_init\_\_.py | 模块初始化文件,标识 `crazy_functions` 是一个包 | -| crazy_functions\下载arxiv论文翻译摘要.py | 下载 `arxiv` 论文的 PDF 文件,并提取摘要和翻译 | -| crazy_functions\代码重写为全英文_多线程.py | 将Python源代码文件中的中文内容转化为英文 | -| crazy_functions\图片生成.py | 根据激励文本使用GPT模型生成相应的图像 | -| crazy_functions\对话历史存档.py | 将每次对话记录写入Markdown格式的文件中 | -| crazy_functions\总结word文档.py | 对输入的word文档进行摘要生成 | -| crazy_functions\总结音视频.py | 对输入的音视频文件进行摘要生成 | -| crazy_functions\批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 | -| crazy_functions\批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 | -| crazy_functions\批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 | -| crazy_functions\批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 | -| crazy_functions\理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 | -| crazy_functions\生成函数注释.py | 自动生成Python函数的注释 | -| crazy_functions\联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 | -| crazy_functions\解析JupyterNotebook.py | 对Jupyter Notebook进行代码解析 | -| crazy_functions\解析项目源代码.py | 对指定编程语言的源代码进行解析 | -| crazy_functions\询问多个大语言模型.py | 使用多个大语言模型对输入进行处理和回复 | -| crazy_functions\读文章写摘要.py | 对论文进行解析和全文摘要生成 | -| crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 | -| crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 | -| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 | -| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | -| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 | -| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | -| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | -| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | -| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 | -| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | -| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | -| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | -| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | -| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | -| request_llm\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | -| request_llm\test_llms.py | 对llm模型进行单元测试。 | - -## 接下来请你逐文件分析下面的工程[0/48] 请对下面的程序文件做一个概述: check_proxy.py - -这个文件主要包含了五个函数: - -1. `check_proxy`:用于检查代理的有效性及地理位置,输出代理配置和所在地信息。 - -2. `backup_and_download`:用于备份当前版本并下载新版本。 - -3. `patch_and_restart`:用于覆盖更新当前版本并重新启动程序。 - -4. `get_current_version`:用于获取当前程序的版本号。 - -5. `auto_update`:用于自动检查新版本并提示用户更新。如果用户选择更新,则备份并下载新版本,覆盖更新当前版本并重新启动程序。如果更新失败,则输出错误信息,并不会向用户进行任何提示。 - -还有一个没有函数名的语句`os.environ['no_proxy'] = '*'`,用于设置环境变量,避免代理网络产生意外污染。 - -此外,该文件导入了以下三个模块/函数: - -- `requests` -- `shutil` -- `os` - -## [1/48] 请对下面的程序文件做一个概述: colorful.py - -该文件是一个Python脚本,用于在控制台中打印彩色文字。该文件包含了一些函数,用于以不同颜色打印文本。其中,红色、绿色、黄色、蓝色、紫色、靛色分别以函数 print红、print绿、print黄、print蓝、print紫、print靛 的形式定义;亮红色、亮绿色、亮黄色、亮蓝色、亮紫色、亮靛色分别以 print亮红、print亮绿、print亮黄、print亮蓝、print亮紫、print亮靛 的形式定义。它们使用 ANSI Escape Code 将彩色输出从控制台突出显示。如果运行在 Linux 操作系统上,文件所执行的操作被留空;否则,该文件导入了 colorama 库并调用 init() 函数进行初始化。最后,通过一系列条件语句,该文件通过将所有彩色输出函数的名称重新赋值为 print 函数的名称来避免输出文件的颜色问题。 - -## [2/48] 请对下面的程序文件做一个概述: config.py - -这个程序文件是用来配置和参数设置的。它包含了许多设置,如API key,使用代理,线程数,默认模型,超时时间等等。此外,它还包含了一些高级功能,如URL重定向等。这些设置将会影响到程序的行为和性能。 - -## [3/48] 请对下面的程序文件做一个概述: config_private.py - -这个程序文件是一个Python脚本,文件名为config_private.py。其中包含以下变量的赋值: - -1. API_KEY:API密钥。 -2. USE_PROXY:是否应用代理。 -3. proxies:如果使用代理,则设置代理网络的协议(socks5/http)、地址(localhost)和端口(11284)。 -4. DEFAULT_WORKER_NUM:默认的工作线程数量。 -5. SLACK_CLAUDE_BOT_ID:Slack机器人ID。 -6. SLACK_CLAUDE_USER_TOKEN:Slack用户令牌。 - -## [4/48] 请对下面的程序文件做一个概述: core_functional.py - -这是一个名为core_functional.py的源代码文件,该文件定义了一个名为get_core_functions()的函数,该函数返回一个字典,该字典包含了各种学术翻译润色任务的说明和相关参数,如颜色、前缀、后缀等。这些任务包括英语学术润色、中文学术润色、查找语法错误、中译英、学术中英互译、英译中、找图片和参考文献转Bib。其中,一些任务还定义了预处理函数用于处理任务的输入文本。 - -## [5/48] 请对下面的程序文件做一个概述: crazy_functional.py - -此程序文件(crazy_functional.py)是一个函数插件集合,包含了多个函数插件的定义和调用。这些函数插件旨在提供一些高级功能,如解析项目源代码、批量翻译PDF文档和Latex全文润色等。其中一些插件还支持热更新功能,不需要重启程序即可生效。文件中的函数插件按照功能进行了分类(第一组和第二组),并且有不同的调用方式(作为按钮或下拉菜单)。 - -## [6/48] 请对下面的程序文件做一个概述: main.py - -这是一个Python程序文件,文件名为main.py。该程序包含一个名为main的函数,程序会自动运行该函数。程序要求已经安装了gradio、os等模块,会根据配置文件加载代理、model、API Key等信息。程序提供了Chatbot功能,实现了一个对话界面,用户可以输入问题,然后Chatbot可以回答问题或者提供相关功能。程序还包含了基础功能区、函数插件区、更换模型 & SysPrompt & 交互界面布局、备选输入区,用户可以在这些区域选择功能和插件进行使用。程序中还包含了一些辅助模块,如logging等。 - -## [7/48] 请对下面的程序文件做一个概述: multi_language.py - -该文件multi_language.py是用于将项目翻译成不同语言的程序。它包含了以下函数和变量:lru_file_cache、contains_chinese、split_list、map_to_json、read_map_from_json、advanced_split、trans、trans_json、step_1_core_key_translate、CACHE_FOLDER、blacklist、LANG、TransPrompt、cached_translation等。注释和文档字符串提供了有关程序的说明,例如如何使用该程序,如何修改“LANG”和“TransPrompt”变量等。 - -## [8/48] 请对下面的程序文件做一个概述: theme.py - -这是一个Python源代码文件,文件名为theme.py。此文件中定义了一个函数adjust_theme,其功能是自定义gradio应用程序的主题,包括调整颜色、字体、阴影等。如果允许,则添加一个看板娘。此文件还包括变量advanced_css,其中包含一些CSS样式,用于高亮显示代码和自定义聊天框样式。此文件还导入了get_conf函数和gradio库。 - -## [9/48] 请对下面的程序文件做一个概述: toolbox.py - -toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和小工具函数,用于协助实现聊天机器人所需的各种功能,包括文本处理、功能插件加载、异常检测、Markdown格式转换,文件读写等等。此外,该库还包含一些依赖、参数配置等信息。该库易于理解和维护。 - -## [10/48] 请对下面的程序文件做一个概述: crazy_functions\crazy_functions_test.py - -这个文件是一个Python测试模块,用于测试crazy_functions中的各种函数插件。这些函数包括:解析Python项目源代码、解析Cpp项目源代码、Latex全文润色、Markdown中译英、批量翻译PDF文档、谷歌检索小助手、总结word文档、下载arxiv论文并翻译摘要、联网回答问题、和解析Jupyter Notebooks。对于每个函数插件,都有一个对应的测试函数来进行测试。 - -## [11/48] 请对下面的程序文件做一个概述: crazy_functions\crazy_utils.py - -这个Python文件中包括了两个函数: - -1. `input_clipping`: 该函数用于裁剪输入文本长度,使其不超过一定的限制。 -2. `request_gpt_model_in_new_thread_with_ui_alive`: 该函数用于请求 GPT 模型并保持用户界面的响应,支持多线程和实时更新用户界面。 - -这两个函数都依赖于从 `toolbox` 和 `request_llm` 中导入的一些工具函数。函数的输入和输出有详细的描述文档。 - -## [12/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文润色.py - -这是一个Python程序文件,文件名为crazy_functions\Latex全文润色.py。文件包含了一个PaperFileGroup类和三个函数Latex英文润色,Latex中文润色和Latex英文纠错。程序使用了字符串处理、正则表达式、文件读写、多线程等技术,主要作用是对整个Latex项目进行润色和纠错。其中润色和纠错涉及到了对文本的语法、清晰度和整体可读性等方面的提升。此外,该程序还参考了第三方库,并封装了一些工具函数。 - -## [13/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文翻译.py - -这个文件包含两个函数 `Latex英译中` 和 `Latex中译英`,它们都会对整个Latex项目进行翻译。这个文件还包含一个类 `PaperFileGroup`,它拥有一个方法 `run_file_split`,用于把长文本文件分成多个短文件。其中使用了工具库 `toolbox` 中的一些函数和从 `request_llm` 中导入了 `model_info`。接下来的函数把文件读取进来,把它们的注释删除,进行分割,并进行翻译。这个文件还包括了一些异常处理和界面更新的操作。 - -## [14/48] 请对下面的程序文件做一个概述: crazy_functions\__init__.py - -这是一个Python模块的初始化文件(__init__.py),命名为"crazy_functions"。该模块包含了一些疯狂的函数,但该文件并没有实现这些函数,而是作为一个包(package)来导入其它的Python模块以实现这些函数。在该文件中,没有定义任何类或函数,它唯一的作用就是标识"crazy_functions"模块是一个包。 - -## [15/48] 请对下面的程序文件做一个概述: crazy_functions\下载arxiv论文翻译摘要.py - -这是一个 Python 程序文件,文件名为 `下载arxiv论文翻译摘要.py`。程序包含多个函数,其中 `下载arxiv论文并翻译摘要` 函数的作用是下载 `arxiv` 论文的 PDF 文件,提取摘要并使用 GPT 对其进行翻译。其他函数包括用于下载 `arxiv` 论文的 `download_arxiv_` 函数和用于获取文章信息的 `get_name` 函数,其中涉及使用第三方库如 requests, BeautifulSoup 等。该文件还包含一些用于调试和存储文件的代码段。 - -## [16/48] 请对下面的程序文件做一个概述: crazy_functions\代码重写为全英文_多线程.py - -该程序文件是一个多线程程序,主要功能是将指定目录下的所有Python代码文件中的中文内容转化为英文,并将转化后的代码存储到一个新的文件中。其中,程序使用了GPT-3等技术进行中文-英文的转化,同时也进行了一些Token限制下的处理,以防止程序发生错误。程序在执行过程中还会输出一些提示信息,并将所有转化过的代码文件存储到指定目录下。在程序执行结束后,还会生成一个任务执行报告,记录程序运行的详细信息。 - -## [17/48] 请对下面的程序文件做一个概述: crazy_functions\图片生成.py - -该程序文件提供了一个用于生成图像的函数`图片生成`。函数实现的过程中,会调用`gen_image`函数来生成图像,并返回图像生成的网址和本地文件地址。函数有多个参数,包括`prompt`(激励文本)、`llm_kwargs`(GPT模型的参数)、`plugin_kwargs`(插件模型的参数)等。函数核心代码使用了`requests`库向OpenAI API请求图像,并做了简单的处理和保存。函数还更新了交互界面,清空聊天历史并显示正在生成图像的消息和最终的图像网址和预览。 - -## [18/48] 请对下面的程序文件做一个概述: crazy_functions\对话历史存档.py - -这个文件是名为crazy_functions\对话历史存档.py的Python程序文件,包含了4个函数: - -1. write_chat_to_file(chatbot, history=None, file_name=None):用来将对话记录以Markdown格式写入文件中,并且生成文件名,如果没指定文件名则用当前时间。写入完成后将文件路径打印出来。 - -2. gen_file_preview(file_name):从传入的文件中读取内容,解析出对话历史记录并返回前100个字符,用于文件预览。 - -3. read_file_to_chat(chatbot, history, file_name):从传入的文件中读取内容,解析出对话历史记录并更新聊天显示框。 - -4. 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):一个主要函数,用于保存当前对话记录并提醒用户。如果用户希望加载历史记录,则调用read_file_to_chat()来更新聊天显示框。如果用户希望删除历史记录,调用删除所有本地对话历史记录()函数完成删除操作。 - -## [19/48] 请对下面的程序文件做一个概述: crazy_functions\总结word文档.py - -该程序文件实现了一个总结Word文档的功能,使用Python的docx库读取docx格式的文件,使用pywin32库读取doc格式的文件。程序会先根据传入的txt参数搜索需要处理的文件,并逐个解析其中的内容,将内容拆分为指定长度的文章片段,然后使用另一个程序文件中的request_gpt_model_in_new_thread_with_ui_alive函数进行中文概述。最后将所有的总结结果写入一个文件中,并在界面上进行展示。 - -## [20/48] 请对下面的程序文件做一个概述: crazy_functions\总结音视频.py - -该程序文件包括两个函数:split_audio_file()和AnalyAudio(),并且导入了一些必要的库并定义了一些工具函数。split_audio_file用于将音频文件分割成多个时长相等的片段,返回一个包含所有切割音频片段文件路径的列表,而AnalyAudio用来分析音频文件,通过调用whisper模型进行音频转文字并使用GPT模型对音频内容进行概述,最终将所有总结结果写入结果文件中。 - -## [21/48] 请对下面的程序文件做一个概述: crazy_functions\批量Markdown翻译.py - -该程序文件名为`批量Markdown翻译.py`,包含了以下功能:读取Markdown文件,将长文本分离开来,将Markdown文件进行翻译(英译中和中译英),整理结果并退出。程序使用了多线程以提高效率。程序使用了`tiktoken`依赖库,可能需要额外安装。文件中还有一些其他的函数和类,但与文件名所描述的功能无关。 - -## [22/48] 请对下面的程序文件做一个概述: crazy_functions\批量总结PDF文档.py - -该文件是一个Python脚本,名为crazy_functions\批量总结PDF文档.py。在导入了一系列库和工具函数后,主要定义了5个函数,其中包括一个错误处理装饰器(@CatchException),用于批量总结PDF文档。该函数主要实现对PDF文档的解析,并调用模型生成中英文摘要。 - -## [23/48] 请对下面的程序文件做一个概述: crazy_functions\批量总结PDF文档pdfminer.py - -该程序文件是一个用于批量总结PDF文档的函数插件,使用了pdfminer插件和BeautifulSoup库来提取PDF文档的文本内容,对每个PDF文件分别进行处理并生成中英文摘要。同时,该程序文件还包括一些辅助工具函数和处理异常的装饰器。 - -## [24/48] 请对下面的程序文件做一个概述: crazy_functions\批量翻译PDF文档_多线程.py - -这个程序文件是一个Python脚本,文件名为“批量翻译PDF文档_多线程.py”。它主要使用了“toolbox”、“request_gpt_model_in_new_thread_with_ui_alive”、“request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency”、“colorful”等Python库和自定义的模块“crazy_utils”的一些函数。程序实现了一个批量翻译PDF文档的功能,可以自动解析PDF文件中的基础信息,递归地切割PDF文件,翻译和处理PDF论文中的所有内容,并生成相应的翻译结果文件(包括md文件和html文件)。功能比较复杂,其中需要调用多个函数和依赖库,涉及到多线程操作和UI更新。文件中有详细的注释和变量命名,代码比较清晰易读。 - -## [25/48] 请对下面的程序文件做一个概述: crazy_functions\理解PDF文档内容.py - -该程序文件实现了一个名为“理解PDF文档内容”的函数,该函数可以为输入的PDF文件提取摘要以及正文各部分的主要内容,并在提取过程中根据上下文关系进行学术性问题解答。该函数依赖于多个辅助函数和第三方库,并在执行过程中针对可能出现的异常进行了处理。 - -## [26/48] 请对下面的程序文件做一个概述: crazy_functions\生成函数注释.py - -该程序文件是一个Python模块文件,文件名为“生成函数注释.py”,定义了两个函数:一个是生成函数注释的主函数“生成函数注释”,另一个是通过装饰器实现异常捕捉的函数“批量生成函数注释”。该程序文件依赖于“toolbox”和本地“crazy_utils”模块,并且在运行时使用了多线程技术和GPT模型来生成注释。函数生成的注释结果使用Markdown表格输出并写入历史记录文件。 - -## [27/48] 请对下面的程序文件做一个概述: crazy_functions\联网的ChatGPT.py - -这是一个名为`联网的ChatGPT.py`的Python程序文件,其中定义了一个函数`连接网络回答问题`。该函数通过爬取搜索引擎的结果和访问网页来综合回答给定的问题,并使用ChatGPT模型完成回答。此外,该文件还包括一些工具函数,例如从网页中抓取文本和使用代理访问网页。 - -## [28/48] 请对下面的程序文件做一个概述: crazy_functions\解析JupyterNotebook.py - -这个程序文件包含了两个函数: `parseNotebook()`和`解析ipynb文件()`,并且引入了一些工具函数和类。`parseNotebook()`函数将Jupyter Notebook文件解析为文本代码块,`解析ipynb文件()`函数则用于解析多个Jupyter Notebook文件,使用`parseNotebook()`解析每个文件和一些其他的处理。函数中使用了多线程处理输入和输出,并且将结果写入到文件中。 - -## [29/48] 请对下面的程序文件做一个概述: crazy_functions\解析项目源代码.py - -这是一个源代码分析的Python代码文件,其中定义了多个函数,包括解析一个Python项目、解析一个C项目、解析一个C项目的头文件和解析一个Java项目等。其中解析源代码新函数是实际处理源代码分析并生成报告的函数。该函数首先会逐个读取传入的源代码文件,生成对应的请求内容,通过多线程发送到chatgpt进行分析。然后将结果写入文件,并进行汇总分析。最后通过调用update_ui函数刷新界面,完整实现了源代码的分析。 - -## [30/48] 请对下面的程序文件做一个概述: crazy_functions\询问多个大语言模型.py - -该程序文件包含两个函数:同时问询()和同时问询_指定模型(),它们的作用是使用多个大语言模型同时对用户输入进行处理,返回对应模型的回复结果。同时问询()会默认使用ChatGPT和ChatGLM两个模型,而同时问询_指定模型()则可以指定要使用的模型。该程序文件还引用了其他的模块和函数库。 - -## [31/48] 请对下面的程序文件做一个概述: crazy_functions\读文章写摘要.py - -这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_execption、write_results_to_file等。 - -## [32/48] 请对下面的程序文件做一个概述: crazy_functions\谷歌检索小助手.py - -该文件是一个Python模块,文件名为“谷歌检索小助手.py”。该模块包含两个函数,一个是“get_meta_information()”,用于从提供的网址中分析出所有相关的学术文献的元数据信息;另一个是“谷歌检索小助手()”,是主函数,用于分析用户提供的谷歌学术搜索页面中出现的文章,并提取相关信息。其中,“谷歌检索小助手()”函数依赖于“get_meta_information()”函数,并调用了其他一些Python模块,如“arxiv”、“math”、“bs4”等。 - -## [33/48] 请对下面的程序文件做一个概述: crazy_functions\高级功能函数模板.py - -该程序文件定义了一个名为高阶功能模板函数的函数,该函数接受多个参数,包括输入的文本、gpt模型参数、插件模型参数、聊天显示框的句柄、聊天历史等,并利用送出请求,使用 Unsplash API 发送相关图片。其中,为了避免输入溢出,函数会在开始时清空历史。函数也有一些 UI 更新的语句。该程序文件还依赖于其他两个模块:CatchException 和 update_ui,以及一个名为 request_gpt_model_in_new_thread_with_ui_alive 的来自 crazy_utils 模块(应该是自定义的工具包)的函数。 - -## [34/48] 请对下面的程序文件做一个概述: request_llm\bridge_all.py - -该文件包含两个函数:predict和predict_no_ui_long_connection,用于基于不同的LLM模型进行对话。该文件还包含一个lazyloadTiktoken类和一个LLM_CATCH_EXCEPTION修饰器函数。其中lazyloadTiktoken类用于懒加载模型的tokenizer,LLM_CATCH_EXCEPTION用于错误处理。整个文件还定义了一些全局变量和模型信息字典,用于引用和配置LLM模型。 - -## [35/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatglm.py - -这是一个Python程序文件,名为`bridge_chatglm.py`,其中定义了一个名为`GetGLMHandle`的类和三个方法:`predict_no_ui_long_connection`、 `predict`和 `stream_chat`。该文件依赖于多个Python库,如`transformers`和`sentencepiece`。该文件实现了一个聊天机器人,使用ChatGLM模型来生成回复,支持单线程和多线程方式。程序启动时需要加载ChatGLM的模型和tokenizer,需要一段时间。在配置文件`config.py`中设置参数会影响模型的内存和显存使用,因此程序可能会导致低配计算机卡死。 - -## [36/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatgpt.py - -该文件为 Python 代码文件,文件名为 request_llm\bridge_chatgpt.py。该代码文件主要提供三个函数:predict、predict_no_ui和 predict_no_ui_long_connection,用于发送至 chatGPT 并等待回复,获取输出。该代码文件还包含一些辅助函数,用于处理连接异常、生成 HTTP 请求等。该文件的代码架构清晰,使用了多个自定义函数和模块。 - -## [37/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_llama.py - -该代码文件实现了一个聊天机器人,其中使用了 JittorLLMs 模型。主要包括以下几个部分: -1. GetGLMHandle 类:一个进程类,用于加载 JittorLLMs 模型并接收并处理请求。 -2. predict_no_ui_long_connection 函数:一个多线程方法,用于在后台运行聊天机器人。 -3. predict 函数:一个单线程方法,用于在前端页面上交互式调用聊天机器人,以获取用户输入并返回相应的回复。 - -这个文件中还有一些辅助函数和全局变量,例如 importlib、time、threading 等。 - -## [38/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_pangualpha.py - -这个文件是为了实现使用jittorllms(一种机器学习模型)来进行聊天功能的代码。其中包括了模型加载、模型的参数加载、消息的收发等相关操作。其中使用了多进程和多线程来提高性能和效率。代码中还包括了处理依赖关系的函数和预处理函数等。 - -## [39/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_rwkv.py - -这个文件是一个Python程序,文件名为request_llm\bridge_jittorllms_rwkv.py。它依赖transformers、time、threading、importlib、multiprocessing等库。在文件中,通过定义GetGLMHandle类加载jittorllms模型参数和定义stream_chat方法来实现与jittorllms模型的交互。同时,该文件还定义了predict_no_ui_long_connection和predict方法来处理历史信息、调用jittorllms模型、接收回复信息并输出结果。 - -## [40/48] 请对下面的程序文件做一个概述: request_llm\bridge_moss.py - -该文件为一个Python源代码文件,文件名为 request_llm\bridge_moss.py。代码定义了一个 GetGLMHandle 类和两个函数 predict_no_ui_long_connection 和 predict。 - -GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个子进程并加载 MOSS 模型参数,通过 Pipe 进行主子进程的通信。该类还定义了 check_dependency、moss_init、run 和 stream_chat 等方法,其中 check_dependency 和 moss_init 是子进程的初始化方法,run 是子进程运行方法,stream_chat 实现了主进程和子进程的交互过程。 - -函数 predict_no_ui_long_connection 是多线程方法,调用 GetGLMHandle 类加载 MOSS 参数后使用 stream_chat 实现主进程和子进程的交互过程。 - -函数 predict 是单线程方法,通过调用 update_ui 将交互过程中 MOSS 的回复实时更新到UI(User Interface)中,并执行一个 named function(additional_fn)指定的函数对输入进行预处理。 - -## [41/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbing.py - -这是一个名为`bridge_newbing.py`的程序文件,包含三个部分: - -第一部分使用from语句导入了`edge_gpt`模块的`NewbingChatbot`类。 - -第二部分定义了一个名为`NewBingHandle`的继承自进程类的子类,该类会检查依赖性并启动进程。同时,该部分还定义了一个名为`predict_no_ui_long_connection`的多线程方法和一个名为`predict`的单线程方法,用于与NewBing进行通信。 - -第三部分定义了一个名为`newbing_handle`的全局变量,并导出了`predict_no_ui_long_connection`和`predict`这两个方法,以供其他程序可以调用。 - -## [42/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbingfree.py - -这个Python文件包含了三部分内容。第一部分是来自edge_gpt_free.py文件的聊天机器人程序。第二部分是子进程Worker,用于调用主体。第三部分提供了两个函数:predict_no_ui_long_connection和predict用于调用NewBing聊天机器人和返回响应。其中predict函数还提供了一些参数用于控制聊天机器人的回复和更新UI界面。 - -## [43/48] 请对下面的程序文件做一个概述: request_llm\bridge_stackclaude.py - -这是一个Python源代码文件,文件名为request_llm\bridge_stackclaude.py。代码分为三个主要部分: - -第一部分定义了Slack API Client类,实现Slack消息的发送、接收、循环监听,用于与Slack API进行交互。 - -第二部分定义了ClaudeHandle类,继承Process类,用于创建子进程Worker,调用主体,实现Claude与用户交互的功能。 - -第三部分定义了predict_no_ui_long_connection和predict两个函数,主要用于通过调用ClaudeHandle对象的stream_chat方法来获取Claude的回复,并更新ui以显示相关信息。其中predict函数采用单线程方法,而predict_no_ui_long_connection函数使用多线程方法。 - -## [44/48] 请对下面的程序文件做一个概述: request_llm\bridge_tgui.py - -该文件是一个Python代码文件,名为request_llm\bridge_tgui.py。它包含了一些函数用于与chatbot UI交互,并通过WebSocket协议与远程LLM模型通信完成文本生成任务,其中最重要的函数是predict()和predict_no_ui_long_connection()。这个程序还有其他的辅助函数,如random_hash()。整个代码文件在协作的基础上完成了一次修改。 - -## [45/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt.py - -该文件是一个用于调用Bing chatbot API的Python程序,它由多个类和辅助函数构成,可以根据给定的对话连接在对话中提出问题,使用websocket与远程服务通信。程序实现了一个聊天机器人,可以为用户提供人工智能聊天。 - -## [46/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt_free.py - -该代码文件为一个会话API,可通过Chathub发送消息以返回响应。其中使用了 aiohttp 和 httpx 库进行网络请求并发送。代码中包含了一些函数和常量,多数用于生成请求数据或是请求头信息等。同时该代码文件还包含了一个 Conversation 类,调用该类可实现对话交互。 - -## [47/48] 请对下面的程序文件做一个概述: request_llm\test_llms.py - -这个文件是用于对llm模型进行单元测试的Python程序。程序导入一个名为"request_llm.bridge_newbingfree"的模块,然后三次使用该模块中的predict_no_ui_long_connection()函数进行预测,并输出结果。此外,还有一些注释掉的代码段,这些代码段也是关于模型预测的。 - -## 用一张Markdown表格简要描述以下文件的功能: -check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, multi_language.py, theme.py, toolbox.py, crazy_functions\crazy_functions_test.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py。根据以上分析,用一句话概括程序的整体功能。 - -| 文件名 | 功能描述 | -| ------ | ------ | -| check_proxy.py | 检查代理有效性及地理位置 | -| colorful.py | 控制台打印彩色文字 | -| config.py | 配置和参数设置 | -| config_private.py | 私人配置和参数设置 | -| core_functional.py | 核心函数和参数设置 | -| crazy_functional.py | 高级功能插件集合 | -| main.py | 一个 Chatbot 程序,提供各种学术翻译、文本处理和其他查询服务 | -| multi_language.py | 识别和翻译不同语言 | -| theme.py | 自定义 gradio 应用程序主题 | -| toolbox.py | 工具类库,用于协助实现各种功能 | -| crazy_functions\crazy_functions_test.py | 测试 crazy_functions 中的各种函数 | -| crazy_functions\crazy_utils.py | 工具函数,用于字符串处理、异常检测、Markdown 格式转换等 | -| crazy_functions\Latex全文润色.py | 对整个 Latex 项目进行润色和纠错 | -| crazy_functions\Latex全文翻译.py | 对整个 Latex 项目进行翻译 | -| crazy_functions\__init__.py | 模块初始化文件,标识 `crazy_functions` 是一个包 | -| crazy_functions\下载arxiv论文翻译摘要.py | 下载 `arxiv` 论文的 PDF 文件,并提取摘要和翻译 | - -这些程序源文件提供了基础的文本和语言处理功能、工具函数和高级插件,使 Chatbot 能够处理各种复杂的学术文本问题,包括润色、翻译、搜索、下载、解析等。 - -## 用一张Markdown表格简要描述以下文件的功能: -crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生成.py, crazy_functions\对话历史存档.py, crazy_functions\总结word文档.py, crazy_functions\总结音视频.py, crazy_functions\批量Markdown翻译.py, crazy_functions\批量总结PDF文档.py, crazy_functions\批量总结PDF文档pdfminer.py, crazy_functions\批量翻译PDF文档_多线程.py, crazy_functions\理解PDF文档内容.py, crazy_functions\生成函数注释.py, crazy_functions\联网的ChatGPT.py, crazy_functions\解析JupyterNotebook.py, crazy_functions\解析项目源代码.py, crazy_functions\询问多个大语言模型.py, crazy_functions\读文章写摘要.py。根据以上分析,用一句话概括程序的整体功能。 - -| 文件名 | 功能简述 | -| --- | --- | -| 代码重写为全英文_多线程.py | 将Python源代码文件中的中文内容转化为英文 | -| 图片生成.py | 根据激励文本使用GPT模型生成相应的图像 | -| 对话历史存档.py | 将每次对话记录写入Markdown格式的文件中 | -| 总结word文档.py | 对输入的word文档进行摘要生成 | -| 总结音视频.py | 对输入的音视频文件进行摘要生成 | -| 批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 | -| 批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 | -| 批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 | -| 批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 | -| 理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 | -| 生成函数注释.py | 自动生成Python函数的注释 | -| 联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 | -| 解析JupyterNotebook.py | 对Jupyter Notebook进行代码解析 | -| 解析项目源代码.py | 对指定编程语言的源代码进行解析 | -| 询问多个大语言模型.py | 使用多个大语言模型对输入进行处理和回复 | -| 读文章写摘要.py | 对论文进行解析和全文摘要生成 | - -概括程序的整体功能:提供了一系列处理文本、文件和代码的功能,使用了各类语言模型、多线程、网络请求和数据解析技术来提高效率和精度。 - -## 用一张Markdown表格简要描述以下文件的功能: -crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llm\bridge_all.py, request_llm\bridge_chatglm.py, request_llm\bridge_chatgpt.py, request_llm\bridge_jittorllms_llama.py, request_llm\bridge_jittorllms_pangualpha.py, request_llm\bridge_jittorllms_rwkv.py, request_llm\bridge_moss.py, request_llm\bridge_newbing.py, request_llm\bridge_newbingfree.py, request_llm\bridge_stackclaude.py, request_llm\bridge_tgui.py, request_llm\edge_gpt.py, request_llm\edge_gpt_free.py, request_llm\test_llms.py。根据以上分析,用一句话概括程序的整体功能。 - -| 文件名 | 功能描述 | -| --- | --- | -| crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 | -| crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 | -| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 | -| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 | -| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 | -| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 | -| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 | -| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 | -| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 | -| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 | -| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 | -| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 | -| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 | -| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 | -| request_llm\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 | -| request_llm\test_llms.py | 对llm模型进行单元测试。 | -| 程序整体功能 | 实现不同种类的聊天机器人,可以根据输入进行文本生成。 | diff --git a/spaces/davidrd123/WikiArt_20genre/README.md b/spaces/davidrd123/WikiArt_20genre/README.md deleted file mode 100644 index 10796c125a62d52326797418925c7d3f13e246e6..0000000000000000000000000000000000000000 --- a/spaces/davidrd123/WikiArt_20genre/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: WikiArt_18genre -emoji: 💻 -colorFrom: gray -colorTo: gray -sdk: gradio -sdk_version: 2.9.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/davidtsong/whisper-demo/README.md b/spaces/davidtsong/whisper-demo/README.md deleted file mode 100644 index 376c39d878607293693aa1bb328612dc211b23d4..0000000000000000000000000000000000000000 --- a/spaces/davidtsong/whisper-demo/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: Whisper Demo -emoji: 📈 -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false ---- - diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/__init__.py deleted file mode 100644 index db7a0c42171b690af349971cc8efcaa30f8ccef5..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/__init__.py +++ /dev/null @@ -1,110 +0,0 @@ -import pkgutil - -import gradio.components as components -import gradio.inputs as inputs -import gradio.outputs as outputs -import gradio.processing_utils -import gradio.templates -import gradio.themes as themes -from gradio.blocks import Blocks -from gradio.chat_interface import ChatInterface -from gradio.components import ( - HTML, - JSON, - AnnotatedImage, - Annotatedimage, - Audio, - BarPlot, - Button, - Carousel, - Chatbot, - Checkbox, - CheckboxGroup, - Checkboxgroup, - ClearButton, - Code, - ColorPicker, - DataFrame, - Dataframe, - Dataset, - Dropdown, - DuplicateButton, - File, - Gallery, - Highlight, - HighlightedText, - Highlightedtext, - Image, - Interpretation, - Json, - Label, - LinePlot, - LoginButton, - LogoutButton, - Markdown, - Model3D, - Number, - Plot, - Radio, - ScatterPlot, - Slider, - State, - StatusTracker, - Text, - Textbox, - TimeSeries, - Timeseries, - UploadButton, - Variable, - Video, - component, -) -from gradio.deploy_space import deploy -from gradio.events import SelectData -from gradio.exceptions import Error -from gradio.external import load -from gradio.flagging import ( - CSVLogger, - FlaggingCallback, - HuggingFaceDatasetJSONSaver, - HuggingFaceDatasetSaver, - SimpleCSVLogger, -) -from gradio.helpers import ( - EventData, - Info, - Progress, - Warning, - make_waveform, - skip, - update, -) -from gradio.helpers import create_examples as Examples # noqa: N812 -from gradio.interface import Interface, TabbedInterface, close_all -from gradio.ipython_ext import load_ipython_extension -from gradio.layouts import Accordion, Box, Column, Group, Row, Tab, TabItem, Tabs -from gradio.mix import Parallel, Series -from gradio.oauth import OAuthProfile -from gradio.routes import Request, mount_gradio_app -from gradio.templates import ( - Files, - ImageMask, - ImagePaint, - List, - Matrix, - Mic, - Microphone, - Numpy, - Paint, - Pil, - PlayableVideo, - Sketchpad, - TextArea, - Webcam, -) -from gradio.themes import Base as Theme - -current_pkg_version = ( - (pkgutil.get_data(__name__, "version.txt") or b"").decode("ascii").strip() -) -__version__ = current_pkg_version diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/annotated_image.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/annotated_image.py deleted file mode 100644 index acb137566dc054a7f6c7c6c9e2734f80960c7db5..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/annotated_image.py +++ /dev/null @@ -1,243 +0,0 @@ -"""gr.AnnotatedImage() component.""" - -from __future__ import annotations - -from typing import Literal - -import numpy as np -from gradio_client.documentation import document, set_documentation_group -from gradio_client.serializing import JSONSerializable -from PIL import Image as _Image # using _ to minimize namespace pollution - -from gradio import utils -from gradio.components.base import IOComponent, _Keywords -from gradio.deprecation import warn_style_method_deprecation -from gradio.events import ( - EventListenerMethod, - Selectable, -) - -set_documentation_group("component") - -_Image.init() # fixes https://github.com/gradio-app/gradio/issues/2843 - - -@document() -class AnnotatedImage(Selectable, IOComponent, JSONSerializable): - """ - Displays a base image and colored subsections on top of that image. Subsections can take the from of rectangles (e.g. object detection) or masks (e.g. image segmentation). - Preprocessing: this component does *not* accept input. - Postprocessing: expects a {Tuple[numpy.ndarray | PIL.Image | str, List[Tuple[numpy.ndarray | Tuple[int, int, int, int], str]]]} consisting of a base image and a list of subsections, that are either (x1, y1, x2, y2) tuples identifying object boundaries, or 0-1 confidence masks of the same shape as the image. A label is provided for each subsection. - - Demos: image_segmentation - """ - - def __init__( - self, - value: tuple[ - np.ndarray | _Image.Image | str, - list[tuple[np.ndarray | tuple[int, int, int, int], str]], - ] - | None = None, - *, - show_legend: bool = True, - height: int | None = None, - width: int | None = None, - color_map: dict[str, str] | None = None, - label: str | None = None, - every: float | None = None, - show_label: bool | None = None, - container: bool = True, - scale: int | None = None, - min_width: int = 160, - visible: bool = True, - elem_id: str | None = None, - elem_classes: list[str] | str | None = None, - **kwargs, - ): - """ - Parameters: - value: Tuple of base image and list of (subsection, label) pairs. - show_legend: If True, will show a legend of the subsections. - height: Height of the displayed image. - width: Width of the displayed image. - color_map: A dictionary mapping labels to colors. The colors must be specified as hex codes. - label: component name in interface. - every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute. - show_label: if True, will display label. - container: If True, will place the component in a container - providing some extra padding around the border. - scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer. - min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first. - visible: If False, component will be hidden. - elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. - elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles. - """ - self.show_legend = show_legend - self.height = height - self.width = width - self.color_map = color_map - self.select: EventListenerMethod - """ - Event listener for when the user selects Image subsection. - Uses event data gradio.SelectData to carry `value` referring to selected subsection label, and `index` to refer to subsection index. - See EventData documentation on how to use this event data. - """ - IOComponent.__init__( - self, - label=label, - every=every, - show_label=show_label, - container=container, - scale=scale, - min_width=min_width, - visible=visible, - elem_id=elem_id, - elem_classes=elem_classes, - value=value, - **kwargs, - ) - - def get_config(self): - return { - "show_legend": self.show_legend, - "value": self.value, - "height": self.height, - "width": self.width, - "color_map": self.color_map, - "selectable": self.selectable, - **IOComponent.get_config(self), - } - - @staticmethod - def update( - value: tuple[ - np.ndarray | _Image.Image | str, - list[tuple[np.ndarray | tuple[int, int, int, int], str]], - ] - | Literal[_Keywords.NO_VALUE] = _Keywords.NO_VALUE, - show_legend: bool | None = None, - height: int | None = None, - width: int | None = None, - color_map: dict[str, str] | None = None, - label: str | None = None, - show_label: bool | None = None, - container: bool | None = None, - scale: int | None = None, - min_width: int | None = None, - visible: bool | None = None, - ): - updated_config = { - "show_legend": show_legend, - "height": height, - "width": width, - "color_map": color_map, - "label": label, - "show_label": show_label, - "container": container, - "scale": scale, - "min_width": min_width, - "visible": visible, - "value": value, - "__type__": "update", - } - return updated_config - - def postprocess( - self, - y: tuple[ - np.ndarray | _Image.Image | str, - list[tuple[np.ndarray | tuple[int, int, int, int], str]], - ], - ) -> tuple[dict, list[tuple[dict, str]]] | None: - """ - Parameters: - y: Tuple of base image and list of subsections, with each subsection a two-part tuple where the first element is a 4 element bounding box or a 0-1 confidence mask, and the second element is the label. - Returns: - Tuple of base image file and list of subsections, with each subsection a two-part tuple where the first element image path of the mask, and the second element is the label. - """ - if y is None: - return None - base_img = y[0] - if isinstance(base_img, str): - base_img_path = base_img - base_img = np.array(_Image.open(base_img)) - elif isinstance(base_img, np.ndarray): - base_file = self.img_array_to_temp_file(base_img, dir=self.DEFAULT_TEMP_DIR) - base_img_path = str(utils.abspath(base_file)) - elif isinstance(base_img, _Image.Image): - base_file = self.pil_to_temp_file(base_img, dir=self.DEFAULT_TEMP_DIR) - base_img_path = str(utils.abspath(base_file)) - base_img = np.array(base_img) - else: - raise ValueError( - "AnnotatedImage only accepts filepaths, PIL images or numpy arrays for the base image." - ) - self.temp_files.add(base_img_path) - - sections = [] - color_map = self.color_map or {} - - def hex_to_rgb(value): - value = value.lstrip("#") - lv = len(value) - return [int(value[i : i + lv // 3], 16) for i in range(0, lv, lv // 3)] - - for mask, label in y[1]: - mask_array = np.zeros((base_img.shape[0], base_img.shape[1])) - if isinstance(mask, np.ndarray): - mask_array = mask - else: - x1, y1, x2, y2 = mask - border_width = 3 - mask_array[y1:y2, x1:x2] = 0.5 - mask_array[y1:y2, x1 : x1 + border_width] = 1 - mask_array[y1:y2, x2 - border_width : x2] = 1 - mask_array[y1 : y1 + border_width, x1:x2] = 1 - mask_array[y2 - border_width : y2, x1:x2] = 1 - - if label in color_map: - rgb_color = hex_to_rgb(color_map[label]) - else: - rgb_color = [255, 0, 0] - colored_mask = np.zeros((base_img.shape[0], base_img.shape[1], 4)) - solid_mask = np.copy(mask_array) - solid_mask[solid_mask > 0] = 1 - - colored_mask[:, :, 0] = rgb_color[0] * solid_mask - colored_mask[:, :, 1] = rgb_color[1] * solid_mask - colored_mask[:, :, 2] = rgb_color[2] * solid_mask - colored_mask[:, :, 3] = mask_array * 255 - - colored_mask_img = _Image.fromarray((colored_mask).astype(np.uint8)) - - mask_file = self.pil_to_temp_file( - colored_mask_img, dir=self.DEFAULT_TEMP_DIR - ) - mask_file_path = str(utils.abspath(mask_file)) - self.temp_files.add(mask_file_path) - - sections.append( - ({"name": mask_file_path, "data": None, "is_file": True}, label) - ) - - return {"name": base_img_path, "data": None, "is_file": True}, sections - - def style( - self, - *, - height: int | None = None, - width: int | None = None, - color_map: dict[str, str] | None = None, - **kwargs, - ): - """ - This method is deprecated. Please set these arguments in the constructor instead. - """ - warn_style_method_deprecation() - if height is not None: - self.height = height - if width is not None: - self.width = width - if color_map is not None: - self.color_map = color_map - return self diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b7554727.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b7554727.css deleted file mode 100644 index 6042064e14607ebbe037f2a497bab442b590be17..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b7554727.css +++ /dev/null @@ -1 +0,0 @@ -.wrap.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{display:flex;flex-wrap:wrap;gap:var(--checkbox-label-gap)}label.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{display:flex;align-items:center;transition:var(--button-transition);cursor:pointer;box-shadow:var(--checkbox-label-shadow);border:var(--checkbox-label-border-width) solid var(--checkbox-label-border-color);border-radius:var(--button-small-radius);background:var(--checkbox-label-background-fill);padding:var(--checkbox-label-padding);color:var(--checkbox-label-text-color);font-weight:var(--checkbox-label-text-weight);font-size:var(--checkbox-label-text-size);line-height:var(--line-md)}label.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:hover{background:var(--checkbox-label-background-fill-hover)}label.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:focus{background:var(--checkbox-label-background-fill-focus)}label.selected.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{background:var(--checkbox-label-background-fill-selected);color:var(--checkbox-label-text-color-selected)}label.svelte-1p9xokt>.svelte-1p9xokt+.svelte-1p9xokt{margin-left:var(--size-2)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{--ring-color:transparent;position:relative;box-shadow:var(--checkbox-shadow);border:var(--checkbox-border-width) solid var(--checkbox-border-color);border-radius:var(--radius-full);background-color:var(--checkbox-background-color);line-height:var(--line-sm)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:checked,input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:checked:hover,input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:checked:focus{border-color:var(--checkbox-border-color-selected);background-image:var(--radio-circle);background-color:var(--checkbox-background-color-selected)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:hover{border-color:var(--checkbox-border-color-hover);background-color:var(--checkbox-background-color-hover)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:focus{border-color:var(--checkbox-border-color-focus);background-color:var(--checkbox-background-color-focus)}input[disabled].svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt,.disabled.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{cursor:not-allowed} diff --git a/spaces/deafheavennnn/metalproxy/README.md b/spaces/deafheavennnn/metalproxy/README.md deleted file mode 100644 index e150cd67ca4cb6e9d77011fce185c3d7469fc66e..0000000000000000000000000000000000000000 --- a/spaces/deafheavennnn/metalproxy/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Metalproxy -emoji: 📚 -colorFrom: indigo -colorTo: pink -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/declare-lab/tango/diffusers/examples/controlnet/train_controlnet.py b/spaces/declare-lab/tango/diffusers/examples/controlnet/train_controlnet.py deleted file mode 100644 index 20c4fbe189a15493f26727580b250e431bbb3f99..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/examples/controlnet/train_controlnet.py +++ /dev/null @@ -1,1046 +0,0 @@ -#!/usr/bin/env python -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and - -import argparse -import logging -import math -import os -import random -from pathlib import Path - -import accelerate -import numpy as np -import torch -import torch.nn.functional as F -import torch.utils.checkpoint -import transformers -from accelerate import Accelerator -from accelerate.logging import get_logger -from accelerate.utils import ProjectConfiguration, set_seed -from datasets import load_dataset -from huggingface_hub import create_repo, upload_folder -from packaging import version -from PIL import Image -from torchvision import transforms -from tqdm.auto import tqdm -from transformers import AutoTokenizer, PretrainedConfig - -import diffusers -from diffusers import ( - AutoencoderKL, - ControlNetModel, - DDPMScheduler, - StableDiffusionControlNetPipeline, - UNet2DConditionModel, - UniPCMultistepScheduler, -) -from diffusers.optimization import get_scheduler -from diffusers.utils import check_min_version, is_wandb_available -from diffusers.utils.import_utils import is_xformers_available - - -if is_wandb_available(): - import wandb - -# Will error if the minimal version of diffusers is not installed. Remove at your own risks. -check_min_version("0.15.0.dev0") - -logger = get_logger(__name__) - - -def log_validation(vae, text_encoder, tokenizer, unet, controlnet, args, accelerator, weight_dtype, step): - logger.info("Running validation... ") - - controlnet = accelerator.unwrap_model(controlnet) - - pipeline = StableDiffusionControlNetPipeline.from_pretrained( - args.pretrained_model_name_or_path, - vae=vae, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - controlnet=controlnet, - safety_checker=None, - revision=args.revision, - torch_dtype=weight_dtype, - ) - pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config) - pipeline = pipeline.to(accelerator.device) - pipeline.set_progress_bar_config(disable=True) - - if args.enable_xformers_memory_efficient_attention: - pipeline.enable_xformers_memory_efficient_attention() - - if args.seed is None: - generator = None - else: - generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) - - if len(args.validation_image) == len(args.validation_prompt): - validation_images = args.validation_image - validation_prompts = args.validation_prompt - elif len(args.validation_image) == 1: - validation_images = args.validation_image * len(args.validation_prompt) - validation_prompts = args.validation_prompt - elif len(args.validation_prompt) == 1: - validation_images = args.validation_image - validation_prompts = args.validation_prompt * len(args.validation_image) - else: - raise ValueError( - "number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`" - ) - - image_logs = [] - - for validation_prompt, validation_image in zip(validation_prompts, validation_images): - validation_image = Image.open(validation_image).convert("RGB") - - images = [] - - for _ in range(args.num_validation_images): - with torch.autocast("cuda"): - image = pipeline( - validation_prompt, validation_image, num_inference_steps=20, generator=generator - ).images[0] - - images.append(image) - - image_logs.append( - {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt} - ) - - for tracker in accelerator.trackers: - if tracker.name == "tensorboard": - for log in image_logs: - images = log["images"] - validation_prompt = log["validation_prompt"] - validation_image = log["validation_image"] - - formatted_images = [] - - formatted_images.append(np.asarray(validation_image)) - - for image in images: - formatted_images.append(np.asarray(image)) - - formatted_images = np.stack(formatted_images) - - tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC") - elif tracker.name == "wandb": - formatted_images = [] - - for log in image_logs: - images = log["images"] - validation_prompt = log["validation_prompt"] - validation_image = log["validation_image"] - - formatted_images.append(wandb.Image(validation_image, caption="Controlnet conditioning")) - - for image in images: - image = wandb.Image(image, caption=validation_prompt) - formatted_images.append(image) - - tracker.log({"validation": formatted_images}) - else: - logger.warn(f"image logging not implemented for {tracker.name}") - - -def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): - text_encoder_config = PretrainedConfig.from_pretrained( - pretrained_model_name_or_path, - subfolder="text_encoder", - revision=revision, - ) - model_class = text_encoder_config.architectures[0] - - if model_class == "CLIPTextModel": - from transformers import CLIPTextModel - - return CLIPTextModel - elif model_class == "RobertaSeriesModelWithTransformation": - from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation - - return RobertaSeriesModelWithTransformation - else: - raise ValueError(f"{model_class} is not supported.") - - -def parse_args(input_args=None): - parser = argparse.ArgumentParser(description="Simple example of a ControlNet training script.") - parser.add_argument( - "--pretrained_model_name_or_path", - type=str, - default=None, - required=True, - help="Path to pretrained model or model identifier from huggingface.co/models.", - ) - parser.add_argument( - "--controlnet_model_name_or_path", - type=str, - default=None, - help="Path to pretrained controlnet model or model identifier from huggingface.co/models." - " If not specified controlnet weights are initialized from unet.", - ) - parser.add_argument( - "--revision", - type=str, - default=None, - required=False, - help=( - "Revision of pretrained model identifier from huggingface.co/models. Trainable model components should be" - " float32 precision." - ), - ) - parser.add_argument( - "--tokenizer_name", - type=str, - default=None, - help="Pretrained tokenizer name or path if not the same as model_name", - ) - parser.add_argument( - "--output_dir", - type=str, - default="controlnet-model", - help="The output directory where the model predictions and checkpoints will be written.", - ) - parser.add_argument( - "--cache_dir", - type=str, - default=None, - help="The directory where the downloaded models and datasets will be stored.", - ) - parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") - parser.add_argument( - "--resolution", - type=int, - default=512, - help=( - "The resolution for input images, all the images in the train/validation dataset will be resized to this" - " resolution" - ), - ) - parser.add_argument( - "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." - ) - parser.add_argument("--num_train_epochs", type=int, default=1) - parser.add_argument( - "--max_train_steps", - type=int, - default=None, - help="Total number of training steps to perform. If provided, overrides num_train_epochs.", - ) - parser.add_argument( - "--checkpointing_steps", - type=int, - default=500, - help=( - "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. " - "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference." - "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components." - "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step" - "instructions." - ), - ) - parser.add_argument( - "--checkpoints_total_limit", - type=int, - default=None, - help=( - "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." - " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" - " for more details" - ), - ) - parser.add_argument( - "--resume_from_checkpoint", - type=str, - default=None, - help=( - "Whether training should be resumed from a previous checkpoint. Use a path saved by" - ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' - ), - ) - parser.add_argument( - "--gradient_accumulation_steps", - type=int, - default=1, - help="Number of updates steps to accumulate before performing a backward/update pass.", - ) - parser.add_argument( - "--gradient_checkpointing", - action="store_true", - help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", - ) - parser.add_argument( - "--learning_rate", - type=float, - default=5e-6, - help="Initial learning rate (after the potential warmup period) to use.", - ) - parser.add_argument( - "--scale_lr", - action="store_true", - default=False, - help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", - ) - parser.add_argument( - "--lr_scheduler", - type=str, - default="constant", - help=( - 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' - ' "constant", "constant_with_warmup"]' - ), - ) - parser.add_argument( - "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." - ) - parser.add_argument( - "--lr_num_cycles", - type=int, - default=1, - help="Number of hard resets of the lr in cosine_with_restarts scheduler.", - ) - parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") - parser.add_argument( - "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." - ) - parser.add_argument( - "--dataloader_num_workers", - type=int, - default=0, - help=( - "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." - ), - ) - parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") - parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") - parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") - parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") - parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") - parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") - parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") - parser.add_argument( - "--hub_model_id", - type=str, - default=None, - help="The name of the repository to keep in sync with the local `output_dir`.", - ) - parser.add_argument( - "--logging_dir", - type=str, - default="logs", - help=( - "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" - " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." - ), - ) - parser.add_argument( - "--allow_tf32", - action="store_true", - help=( - "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" - " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" - ), - ) - parser.add_argument( - "--report_to", - type=str, - default="tensorboard", - help=( - 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' - ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' - ), - ) - parser.add_argument( - "--mixed_precision", - type=str, - default=None, - choices=["no", "fp16", "bf16"], - help=( - "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" - " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" - " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." - ), - ) - parser.add_argument( - "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." - ) - parser.add_argument( - "--set_grads_to_none", - action="store_true", - help=( - "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain" - " behaviors, so disable this argument if it causes any problems. More info:" - " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html" - ), - ) - parser.add_argument( - "--dataset_name", - type=str, - default=None, - help=( - "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," - " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," - " or to a folder containing files that 🤗 Datasets can understand." - ), - ) - parser.add_argument( - "--dataset_config_name", - type=str, - default=None, - help="The config of the Dataset, leave as None if there's only one config.", - ) - parser.add_argument( - "--train_data_dir", - type=str, - default=None, - help=( - "A folder containing the training data. Folder contents must follow the structure described in" - " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" - " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." - ), - ) - parser.add_argument( - "--image_column", type=str, default="image", help="The column of the dataset containing the target image." - ) - parser.add_argument( - "--conditioning_image_column", - type=str, - default="conditioning_image", - help="The column of the dataset containing the controlnet conditioning image.", - ) - parser.add_argument( - "--caption_column", - type=str, - default="text", - help="The column of the dataset containing a caption or a list of captions.", - ) - parser.add_argument( - "--max_train_samples", - type=int, - default=None, - help=( - "For debugging purposes or quicker training, truncate the number of training examples to this " - "value if set." - ), - ) - parser.add_argument( - "--proportion_empty_prompts", - type=float, - default=0, - help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).", - ) - parser.add_argument( - "--validation_prompt", - type=str, - default=None, - nargs="+", - help=( - "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`." - " Provide either a matching number of `--validation_image`s, a single `--validation_image`" - " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s." - ), - ) - parser.add_argument( - "--validation_image", - type=str, - default=None, - nargs="+", - help=( - "A set of paths to the controlnet conditioning image be evaluated every `--validation_steps`" - " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a" - " a single `--validation_prompt` to be used with all `--validation_image`s, or a single" - " `--validation_image` that will be used with all `--validation_prompt`s." - ), - ) - parser.add_argument( - "--num_validation_images", - type=int, - default=4, - help="Number of images to be generated for each `--validation_image`, `--validation_prompt` pair", - ) - parser.add_argument( - "--validation_steps", - type=int, - default=100, - help=( - "Run validation every X steps. Validation consists of running the prompt" - " `args.validation_prompt` multiple times: `args.num_validation_images`" - " and logging the images." - ), - ) - parser.add_argument( - "--tracker_project_name", - type=str, - default="train_controlnet", - required=True, - help=( - "The `project_name` argument passed to Accelerator.init_trackers for" - " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" - ), - ) - - if input_args is not None: - args = parser.parse_args(input_args) - else: - args = parser.parse_args() - - if args.dataset_name is None and args.train_data_dir is None: - raise ValueError("Specify either `--dataset_name` or `--train_data_dir`") - - if args.dataset_name is not None and args.train_data_dir is not None: - raise ValueError("Specify only one of `--dataset_name` or `--train_data_dir`") - - if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1: - raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].") - - if args.validation_prompt is not None and args.validation_image is None: - raise ValueError("`--validation_image` must be set if `--validation_prompt` is set") - - if args.validation_prompt is None and args.validation_image is not None: - raise ValueError("`--validation_prompt` must be set if `--validation_image` is set") - - if ( - args.validation_image is not None - and args.validation_prompt is not None - and len(args.validation_image) != 1 - and len(args.validation_prompt) != 1 - and len(args.validation_image) != len(args.validation_prompt) - ): - raise ValueError( - "Must provide either 1 `--validation_image`, 1 `--validation_prompt`," - " or the same number of `--validation_prompt`s and `--validation_image`s" - ) - - return args - - -def make_train_dataset(args, tokenizer, accelerator): - # Get the datasets: you can either provide your own training and evaluation files (see below) - # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). - - # In distributed training, the load_dataset function guarantees that only one local process can concurrently - # download the dataset. - if args.dataset_name is not None: - # Downloading and loading a dataset from the hub. - dataset = load_dataset( - args.dataset_name, - args.dataset_config_name, - cache_dir=args.cache_dir, - ) - else: - if args.train_data_dir is not None: - dataset = load_dataset( - args.train_data_dir, - cache_dir=args.cache_dir, - ) - # See more about loading custom images at - # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script - - # Preprocessing the datasets. - # We need to tokenize inputs and targets. - column_names = dataset["train"].column_names - - # 6. Get the column names for input/target. - if args.image_column is None: - image_column = column_names[0] - logger.info(f"image column defaulting to {image_column}") - else: - image_column = args.image_column - if image_column not in column_names: - raise ValueError( - f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" - ) - - if args.caption_column is None: - caption_column = column_names[1] - logger.info(f"caption column defaulting to {caption_column}") - else: - caption_column = args.caption_column - if caption_column not in column_names: - raise ValueError( - f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" - ) - - if args.conditioning_image_column is None: - conditioning_image_column = column_names[2] - logger.info(f"conditioning image column defaulting to {caption_column}") - else: - conditioning_image_column = args.conditioning_image_column - if conditioning_image_column not in column_names: - raise ValueError( - f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}" - ) - - def tokenize_captions(examples, is_train=True): - captions = [] - for caption in examples[caption_column]: - if random.random() < args.proportion_empty_prompts: - captions.append("") - elif isinstance(caption, str): - captions.append(caption) - elif isinstance(caption, (list, np.ndarray)): - # take a random caption if there are multiple - captions.append(random.choice(caption) if is_train else caption[0]) - else: - raise ValueError( - f"Caption column `{caption_column}` should contain either strings or lists of strings." - ) - inputs = tokenizer( - captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" - ) - return inputs.input_ids - - image_transforms = transforms.Compose( - [ - transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.ToTensor(), - transforms.Normalize([0.5], [0.5]), - ] - ) - - conditioning_image_transforms = transforms.Compose( - [ - transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), - transforms.ToTensor(), - ] - ) - - def preprocess_train(examples): - images = [image.convert("RGB") for image in examples[image_column]] - images = [image_transforms(image) for image in images] - - conditioning_images = [image.convert("RGB") for image in examples[conditioning_image_column]] - conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images] - - examples["pixel_values"] = images - examples["conditioning_pixel_values"] = conditioning_images - examples["input_ids"] = tokenize_captions(examples) - - return examples - - with accelerator.main_process_first(): - if args.max_train_samples is not None: - dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) - # Set the training transforms - train_dataset = dataset["train"].with_transform(preprocess_train) - - return train_dataset - - -def collate_fn(examples): - pixel_values = torch.stack([example["pixel_values"] for example in examples]) - pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() - - conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples]) - conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float() - - input_ids = torch.stack([example["input_ids"] for example in examples]) - - return { - "pixel_values": pixel_values, - "conditioning_pixel_values": conditioning_pixel_values, - "input_ids": input_ids, - } - - -def main(args): - logging_dir = Path(args.output_dir, args.logging_dir) - - accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit) - - accelerator = Accelerator( - gradient_accumulation_steps=args.gradient_accumulation_steps, - mixed_precision=args.mixed_precision, - log_with=args.report_to, - logging_dir=logging_dir, - project_config=accelerator_project_config, - ) - - # Make one log on every process with the configuration for debugging. - logging.basicConfig( - format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", - datefmt="%m/%d/%Y %H:%M:%S", - level=logging.INFO, - ) - logger.info(accelerator.state, main_process_only=False) - if accelerator.is_local_main_process: - transformers.utils.logging.set_verbosity_warning() - diffusers.utils.logging.set_verbosity_info() - else: - transformers.utils.logging.set_verbosity_error() - diffusers.utils.logging.set_verbosity_error() - - # If passed along, set the training seed now. - if args.seed is not None: - set_seed(args.seed) - - # Handle the repository creation - if accelerator.is_main_process: - if args.output_dir is not None: - os.makedirs(args.output_dir, exist_ok=True) - - if args.push_to_hub: - repo_id = create_repo( - repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token - ).repo_id - - # Load the tokenizer - if args.tokenizer_name: - tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) - elif args.pretrained_model_name_or_path: - tokenizer = AutoTokenizer.from_pretrained( - args.pretrained_model_name_or_path, - subfolder="tokenizer", - revision=args.revision, - use_fast=False, - ) - - # import correct text encoder class - text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) - - # Load scheduler and models - noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") - text_encoder = text_encoder_cls.from_pretrained( - args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision - ) - vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) - unet = UNet2DConditionModel.from_pretrained( - args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision - ) - - if args.controlnet_model_name_or_path: - logger.info("Loading existing controlnet weights") - controlnet = ControlNetModel.from_pretrained(args.controlnet_model_name_or_path) - else: - logger.info("Initializing controlnet weights from unet") - controlnet = ControlNetModel.from_unet(unet) - - # `accelerate` 0.16.0 will have better support for customized saving - if version.parse(accelerate.__version__) >= version.parse("0.16.0"): - # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format - def save_model_hook(models, weights, output_dir): - i = len(weights) - 1 - - while len(weights) > 0: - weights.pop() - model = models[i] - - sub_dir = "controlnet" - model.save_pretrained(os.path.join(output_dir, sub_dir)) - - i -= 1 - - def load_model_hook(models, input_dir): - while len(models) > 0: - # pop models so that they are not loaded again - model = models.pop() - - # load diffusers style into model - load_model = ControlNetModel.from_pretrained(input_dir, subfolder="controlnet") - model.register_to_config(**load_model.config) - - model.load_state_dict(load_model.state_dict()) - del load_model - - accelerator.register_save_state_pre_hook(save_model_hook) - accelerator.register_load_state_pre_hook(load_model_hook) - - vae.requires_grad_(False) - unet.requires_grad_(False) - text_encoder.requires_grad_(False) - controlnet.train() - - if args.enable_xformers_memory_efficient_attention: - if is_xformers_available(): - import xformers - - xformers_version = version.parse(xformers.__version__) - if xformers_version == version.parse("0.0.16"): - logger.warn( - "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." - ) - unet.enable_xformers_memory_efficient_attention() - controlnet.enable_xformers_memory_efficient_attention() - else: - raise ValueError("xformers is not available. Make sure it is installed correctly") - - if args.gradient_checkpointing: - controlnet.enable_gradient_checkpointing() - - # Check that all trainable models are in full precision - low_precision_error_string = ( - " Please make sure to always have all model weights in full float32 precision when starting training - even if" - " doing mixed precision training, copy of the weights should still be float32." - ) - - if accelerator.unwrap_model(controlnet).dtype != torch.float32: - raise ValueError( - f"Controlnet loaded as datatype {accelerator.unwrap_model(controlnet).dtype}. {low_precision_error_string}" - ) - - # Enable TF32 for faster training on Ampere GPUs, - # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices - if args.allow_tf32: - torch.backends.cuda.matmul.allow_tf32 = True - - if args.scale_lr: - args.learning_rate = ( - args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes - ) - - # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs - if args.use_8bit_adam: - try: - import bitsandbytes as bnb - except ImportError: - raise ImportError( - "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." - ) - - optimizer_class = bnb.optim.AdamW8bit - else: - optimizer_class = torch.optim.AdamW - - # Optimizer creation - params_to_optimize = controlnet.parameters() - optimizer = optimizer_class( - params_to_optimize, - lr=args.learning_rate, - betas=(args.adam_beta1, args.adam_beta2), - weight_decay=args.adam_weight_decay, - eps=args.adam_epsilon, - ) - - train_dataset = make_train_dataset(args, tokenizer, accelerator) - - train_dataloader = torch.utils.data.DataLoader( - train_dataset, - shuffle=True, - collate_fn=collate_fn, - batch_size=args.train_batch_size, - num_workers=args.dataloader_num_workers, - ) - - # Scheduler and math around the number of training steps. - overrode_max_train_steps = False - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if args.max_train_steps is None: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - overrode_max_train_steps = True - - lr_scheduler = get_scheduler( - args.lr_scheduler, - optimizer=optimizer, - num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, - num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, - num_cycles=args.lr_num_cycles, - power=args.lr_power, - ) - - # Prepare everything with our `accelerator`. - controlnet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( - controlnet, optimizer, train_dataloader, lr_scheduler - ) - - # For mixed precision training we cast the text_encoder and vae weights to half-precision - # as these models are only used for inference, keeping weights in full precision is not required. - weight_dtype = torch.float32 - if accelerator.mixed_precision == "fp16": - weight_dtype = torch.float16 - elif accelerator.mixed_precision == "bf16": - weight_dtype = torch.bfloat16 - - # Move vae, unet and text_encoder to device and cast to weight_dtype - vae.to(accelerator.device, dtype=weight_dtype) - unet.to(accelerator.device, dtype=weight_dtype) - text_encoder.to(accelerator.device, dtype=weight_dtype) - - # We need to recalculate our total training steps as the size of the training dataloader may have changed. - num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) - if overrode_max_train_steps: - args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch - # Afterwards we recalculate our number of training epochs - args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) - - # We need to initialize the trackers we use, and also store our configuration. - # The trackers initializes automatically on the main process. - if accelerator.is_main_process: - tracker_config = dict(vars(args)) - - # tensorboard cannot handle list types for config - tracker_config.pop("validation_prompt") - tracker_config.pop("validation_image") - - accelerator.init_trackers(args.tracker_project_name, config=tracker_config) - - # Train! - total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps - - logger.info("***** Running training *****") - logger.info(f" Num examples = {len(train_dataset)}") - logger.info(f" Num batches each epoch = {len(train_dataloader)}") - logger.info(f" Num Epochs = {args.num_train_epochs}") - logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") - logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") - logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") - logger.info(f" Total optimization steps = {args.max_train_steps}") - global_step = 0 - first_epoch = 0 - - # Potentially load in the weights and states from a previous save - if args.resume_from_checkpoint: - if args.resume_from_checkpoint != "latest": - path = os.path.basename(args.resume_from_checkpoint) - else: - # Get the most recent checkpoint - dirs = os.listdir(args.output_dir) - dirs = [d for d in dirs if d.startswith("checkpoint")] - dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) - path = dirs[-1] if len(dirs) > 0 else None - - if path is None: - accelerator.print( - f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." - ) - args.resume_from_checkpoint = None - initial_global_step = 0 - else: - accelerator.print(f"Resuming from checkpoint {path}") - accelerator.load_state(os.path.join(args.output_dir, path)) - global_step = int(path.split("-")[1]) - - initial_global_step = global_step * args.gradient_accumulation_steps - first_epoch = global_step // num_update_steps_per_epoch - else: - initial_global_step = 0 - - progress_bar = tqdm( - range(0, args.max_train_steps), - initial=initial_global_step, - desc="Steps", - # Only show the progress bar once on each machine. - disable=not accelerator.is_local_main_process, - ) - - for epoch in range(first_epoch, args.num_train_epochs): - for step, batch in enumerate(train_dataloader): - with accelerator.accumulate(controlnet): - # Convert images to latent space - latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() - latents = latents * vae.config.scaling_factor - - # Sample noise that we'll add to the latents - noise = torch.randn_like(latents) - bsz = latents.shape[0] - # Sample a random timestep for each image - timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) - timesteps = timesteps.long() - - # Add noise to the latents according to the noise magnitude at each timestep - # (this is the forward diffusion process) - noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) - - # Get the text embedding for conditioning - encoder_hidden_states = text_encoder(batch["input_ids"])[0] - - controlnet_image = batch["conditioning_pixel_values"].to(dtype=weight_dtype) - - down_block_res_samples, mid_block_res_sample = controlnet( - noisy_latents, - timesteps, - encoder_hidden_states=encoder_hidden_states, - controlnet_cond=controlnet_image, - return_dict=False, - ) - - # Predict the noise residual - model_pred = unet( - noisy_latents, - timesteps, - encoder_hidden_states=encoder_hidden_states, - down_block_additional_residuals=down_block_res_samples, - mid_block_additional_residual=mid_block_res_sample, - ).sample - - # Get the target for loss depending on the prediction type - if noise_scheduler.config.prediction_type == "epsilon": - target = noise - elif noise_scheduler.config.prediction_type == "v_prediction": - target = noise_scheduler.get_velocity(latents, noise, timesteps) - else: - raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") - loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") - - accelerator.backward(loss) - if accelerator.sync_gradients: - params_to_clip = controlnet.parameters() - accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) - optimizer.step() - lr_scheduler.step() - optimizer.zero_grad(set_to_none=args.set_grads_to_none) - - # Checks if the accelerator has performed an optimization step behind the scenes - if accelerator.sync_gradients: - progress_bar.update(1) - global_step += 1 - - if accelerator.is_main_process: - if global_step % args.checkpointing_steps == 0: - save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") - accelerator.save_state(save_path) - logger.info(f"Saved state to {save_path}") - - if args.validation_prompt is not None and global_step % args.validation_steps == 0: - log_validation( - vae, - text_encoder, - tokenizer, - unet, - controlnet, - args, - accelerator, - weight_dtype, - global_step, - ) - - logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} - progress_bar.set_postfix(**logs) - accelerator.log(logs, step=global_step) - - if global_step >= args.max_train_steps: - break - - # Create the pipeline using using the trained modules and save it. - accelerator.wait_for_everyone() - if accelerator.is_main_process: - controlnet = accelerator.unwrap_model(controlnet) - controlnet.save_pretrained(args.output_dir) - - if args.push_to_hub: - upload_folder( - repo_id=repo_id, - folder_path=args.output_dir, - commit_message="End of training", - ignore_patterns=["step_*", "epoch_*"], - ) - - accelerator.end_training() - - -if __name__ == "__main__": - args = parse_args() - main(args) diff --git a/spaces/decodemai/business_tech_ideas/app.py b/spaces/decodemai/business_tech_ideas/app.py deleted file mode 100644 index a42823a1c6c3c71c8f06f7264187e4b9ef9375d1..0000000000000000000000000000000000000000 --- a/spaces/decodemai/business_tech_ideas/app.py +++ /dev/null @@ -1,99 +0,0 @@ -import json -import requests -import gradio as gr -import random -import time -import os -import datetime -from datetime import datetime - - -API_TOKEN = os.getenv("API_TOKEN") -DECODEM_TOKEN=os.getenv("DECODEM_TOKEN") - - -from huggingface_hub import InferenceApi -inference = InferenceApi("bigscience/bloom",token=API_TOKEN) - -headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} -url_decodemprompts='https://us-central1-createinsightsproject.cloudfunctions.net/getdecodemprompts' - -data={"prompt_type":'business_tech_ideas',"decodem_token":DECODEM_TOKEN} -try: - r = requests.post(url_decodemprompts, data=json.dumps(data), headers=headers) -except requests.exceptions.ReadTimeout as e: - print(e) -#print(r.content) - -prompt=str(r.content, 'UTF-8') - -def infer(prompt, - max_length = 250, - top_k = 0, - num_beams = 0, - no_repeat_ngram_size = 2, - top_p = 0.9, - seed=42, - temperature=0.7, - greedy_decoding = False, - return_full_text = False): - - print(seed) - top_k = None if top_k == 0 else top_k - do_sample = False if num_beams > 0 else not greedy_decoding - num_beams = None if (greedy_decoding or num_beams == 0) else num_beams - no_repeat_ngram_size = None if num_beams is None else no_repeat_ngram_size - top_p = None if num_beams else top_p - early_stopping = None if num_beams is None else num_beams > 0 - - params = { - "max_new_tokens": max_length, - "top_k": top_k, - "top_p": top_p, - "temperature": temperature, - "do_sample": do_sample, - "seed": seed, - "early_stopping":early_stopping, - "no_repeat_ngram_size":no_repeat_ngram_size, - "num_beams":num_beams, - "return_full_text":return_full_text - } - - s = time.time() - response = inference(prompt, params=params) - #print(response) - proc_time = time.time()-s - #print(f"Processing time was {proc_time} seconds") - return response - -def getideas(text_inp): - print(text_inp) - print(datetime.today().strftime("%d-%m-%Y")) - - text = prompt+"\nInput:"+text_inp + "\nOutput:" - resp = infer(text,seed=random.randint(0,100)) - - generated_text=resp[0]['generated_text'] - result = generated_text.replace(text,'').strip() - result = result.replace("Output:","") - parts = result.split("###") - topic = parts[0].strip() - topic="\n".join(topic.split('\n')[:3]) - print(topic) - return(topic) - -with gr.Blocks() as demo: - gr.Markdown("

      Tech Ideas for Your Business

      ") - gr.Markdown( - """ChatGPT based Insights from Decodem.ai for businesses.\nWhile ChatGPT has multiple use cases we have evolved specific use cases/ templates for businesses \n\n This template provides ideas on how a business would look like in the future. Enter a business area and get the results. We use a equally powerful AI model bigscience/bloom.""" - ) - textbox = gr.Textbox(placeholder="Enter business type here...", lines=1,label='Your business area') - btn = gr.Button("Generate") - output1 = gr.Textbox(lines=2,label='The future') - - btn.click(getideas,inputs=[textbox], outputs=[output1]) - examples = gr.Examples(examples=['icecream parlor','space travel','book shop','ecommerce','grocery delivery'], - inputs=[textbox]) - - -demo.launch() \ No newline at end of file diff --git a/spaces/derek-thomas/RAGDemo/app.py b/spaces/derek-thomas/RAGDemo/app.py deleted file mode 100644 index 132e48e0905717fd051c51afd568332c4c36a17d..0000000000000000000000000000000000000000 --- a/spaces/derek-thomas/RAGDemo/app.py +++ /dev/null @@ -1,148 +0,0 @@ -import subprocess - -subprocess.run(["pip", "install", "--upgrade", "transformers[torch,sentencepiece]==4.34.1"]) - -from functools import partial -import logging -from pathlib import Path -from time import perf_counter - -import gradio as gr -from jinja2 import Environment, FileSystemLoader - -from backend.query_llm import generate -from backend.semantic_search import qd_retriever - -proj_dir = Path(__file__).parent -# Setting up the logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -# Set up the template environment with the templates directory -env = Environment(loader=FileSystemLoader(proj_dir / 'templates')) - -# Load the templates directly from the environment -template = env.get_template('template.j2') -template_html = env.get_template('template_html.j2') - -# Examples -examples = ['What is the capital of China?', - 'Why is the sky blue?', - 'Who won the mens world cup in 2014?', ] - - -def add_text(history, text): - history = [] if history is None else history - history = history + [(text, None)] - return history, gr.Textbox(value="", interactive=False) - - -def bot(history, hyde=False): - top_k = 4 - query = history[-1][0] - - logger.warning('Retrieving documents...') - # Retrieve documents relevant to query - document_start = perf_counter() - if hyde: - hyde_document = "" - generator = generate(f"Write a wikipedia article intro paragraph to answer this query: {query}", history) - for output_chunk in generator: - hyde_document = output_chunk - - logger.warning(hyde_document) - documents = qd_retriever.retrieve(hyde_document, top_k=top_k) - else: - documents = qd_retriever.retrieve(query, top_k=top_k) - document_time = perf_counter() - document_start - logger.warning(f'Finished Retrieving documents in {round(document_time, 2)} seconds...') - - # Create Prompt - prompt = template.render(documents=documents, query=query) - prompt_html = template_html.render(documents=documents, query=query) - - history[-1][1] = "" - for character in generate(prompt, history[:-1]): - history[-1][1] = character - yield history, prompt_html - - -with gr.Blocks() as demo: - with gr.Tab("RAGDemo"): - chatbot = gr.Chatbot( - [], - elem_id="chatbot", - avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg', - 'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'), - bubble_full_width=False, - show_copy_button=True, - show_share_button=True, - ) - - with gr.Row(): - txt = gr.Textbox( - scale=3, - show_label=False, - placeholder="Enter text and press enter", - container=False, - ) - txt_btn = gr.Button(value="Submit text", scale=1) - - # Examples - gr.Examples(examples, txt) - - prompt_html = gr.HTML() - # Turn off interactivity while generating if you click - txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then( - bot, chatbot, [chatbot, prompt_html]) - - # Turn it back on - txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False) - - # Turn off interactivity while generating if you hit enter - txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then( - bot, chatbot, [chatbot, prompt_html]) - - # Turn it back on - txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False) - - with gr.Tab("RAGDemo + HyDE"): - hyde_chatbot = gr.Chatbot( - [], - elem_id="chatbot", - avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg', - 'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'), - bubble_full_width=False, - show_copy_button=True, - show_share_button=True, - ) - - with gr.Row(): - hyde_txt = gr.Textbox( - scale=3, - show_label=False, - placeholder="Enter text and press enter", - container=False, - ) - hyde_txt_btn = gr.Button(value="Submit text", scale=1) - - # Examples - gr.Examples(examples, hyde_txt) - - hyde_prompt_html = gr.HTML() - # Turn off interactivity while generating if you click - hyde_txt_msg = hyde_txt_btn.click(add_text, [hyde_chatbot, hyde_txt], [hyde_chatbot, hyde_txt], queue=False).then( - partial(bot, hyde=True), [hyde_chatbot], [hyde_chatbot, hyde_prompt_html]) - - # Turn it back on - hyde_txt_msg.then(lambda: gr.Textbox(interactive=True), None, [hyde_txt], queue=False) - - # Turn off interactivity while generating if you hit enter - hyde_txt_msg = hyde_txt.submit(add_text, [hyde_chatbot, hyde_txt], [hyde_chatbot, hyde_txt], queue=False).then( - partial(bot, hyde=True), [hyde_chatbot], [hyde_chatbot, hyde_prompt_html]) - - # Turn it back on - hyde_txt_msg.then(lambda: gr.Textbox(interactive=True), None, [hyde_txt], queue=False) - -demo.queue() -demo.launch(debug=True) diff --git a/spaces/devduttabain/facebook-musicgen-small/README.md b/spaces/devduttabain/facebook-musicgen-small/README.md deleted file mode 100644 index c6caf022aeaa33a433565b807a46088c790981ce..0000000000000000000000000000000000000000 --- a/spaces/devduttabain/facebook-musicgen-small/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Facebook Musicgen Small -emoji: 📉 -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.35.2 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/diacanFperku/AutoGPT/Ezx-Number-1-Hits-Keygen-BEST-35.md b/spaces/diacanFperku/AutoGPT/Ezx-Number-1-Hits-Keygen-BEST-35.md deleted file mode 100644 index 5236784bdea012cbae4493abbba054d8b16c5128..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Ezx-Number-1-Hits-Keygen-BEST-35.md +++ /dev/null @@ -1,33 +0,0 @@ -ezx number 1 hits keygen 35 - - - -CLICK HERE ::: [https://maudaracte.blogspot.com/?file=2tvJdC](https://maudaracte.blogspot.com/?file=2tvJdC) - - - - - - - - - -Here is a possible title and article with html formatting for the keyword "ezx number 1 hits keygen 35": - -How to Use EZX Number 1 Hits Keygen 35 to Create Amazing Drum Tracks -EZX Number 1 Hits is a sound expansion pack for Toontrack EZ Drummer and Superior Drummer that features a fusion of organic, electronic, high-tech and vintage drum sounds. It covers classic sounds from machines like Linn, TR-909, TR-808, DR55 and more, as well as custom designed acoustic sounds produced and mixed by Grammy Award-Winner Niklas Flyckt (Britney Spears -'Toxic'). -If you want to create contemporary pop, dance, house or hip hop drum tracks with EZX Number 1 Hits, you will need a keygen to unlock the full potential of this amazing product. A keygen is a software tool that generates a unique serial number or activation code for a software product. With a keygen, you can bypass the copy protection and use the software without any limitations. -However, finding a reliable and working keygen for EZX Number 1 Hits is not easy. There are many fake or malicious keygens on the internet that can harm your computer or steal your personal information. That's why we have created this guide to help you find and use EZX Number 1 Hits Keygen 35, which is one of the best and safest keygens available for this product. -Step 1: Download EZX Number 1 Hits Keygen 35 -The first step is to download EZX Number 1 Hits Keygen 35 from a trusted source. You can find it on bitbucket.org[^1^], iscribble.org[^2^] or soundcloud.com[^3^]. These are some of the websites that offer this keygen for free and without any viruses or malware. Just click on the link and follow the instructions to download the keygen file. -Step 2: Run EZX Number 1 Hits Keygen 35 -The next step is to run EZX Number 1 Hits Keygen 35 on your computer. You will need to extract the keygen file from the zip archive first. Then, double-click on the keygen.exe file to launch it. You will see a simple interface with a button that says "Generate". Click on it and wait for a few seconds until the keygen generates a serial number for EZX Number 1 Hits. -Step 3: Activate EZX Number 1 Hits with the Serial Number -The final step is to activate EZX Number 1 Hits with the serial number that you got from the keygen. You will need to have EZ Drummer or Superior Drummer installed on your computer first. Then, open the software and go to the settings menu. There, you will find an option to enter your serial number for EZX Number 1 Hits. Paste the serial number that you copied from the keygen and click on "OK". The software will verify your serial number and activate EZX Number 1 Hits for you. -Enjoy Creating Amazing Drum Tracks with EZX Number 1 Hits -Congratulations! You have successfully activated EZX Number 1 Hits with EZX Number 1 Hits Keygen 35. Now you can enjoy creating amazing drum tracks with this sound expansion pack. You can choose from different kits, sounds and effects to suit your musical style and genre. You can also use the MIDI files that come with EZX Number 1 Hits to get some inspiration or modify them to your liking. -EZX Number 1 Hits is a one-stop-shop for instant drum production. It has all the timeless sounds that have been used in countless hits over the years, as well as some fresh and modern sounds that will make your tracks stand out. With EZX Number 1 Hits Keygen 35, you can unlock all these sounds and use them without any restrictions. -So what are you waiting for? Download EZX Number 1 Hits Keygen 35 today and start making some number one hits! dfd1c89656 - - - diff --git a/spaces/diacanFperku/AutoGPT/Fuoco Nella Stiva 2 Movie In Italian Free Download ((NEW)) Mp4.md b/spaces/diacanFperku/AutoGPT/Fuoco Nella Stiva 2 Movie In Italian Free Download ((NEW)) Mp4.md deleted file mode 100644 index 919aeadca12b093183392d20509818b125fd2fc1..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Fuoco Nella Stiva 2 Movie In Italian Free Download ((NEW)) Mp4.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Fuoco nella stiva 2 movie in italian free download mp4


      Download ……… https://gohhs.com/2uFTWO



      - -Ya haider mp4 download ... Linking words worksheet 2 ... SN Betting Reviews Free betting Bet In Play Mobile Betting Head 2 Head P2P Betting Betting ... League Spain La Liga Italy Serie A Germany Bundesliga Europe Transfers USA MLS Like ... Tips may refer to: Tips Industries, an Indian film production company Tips ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/diacanFperku/AutoGPT/Mankatha Movie !!INSTALL!! Download Tamilrockers 26.md b/spaces/diacanFperku/AutoGPT/Mankatha Movie !!INSTALL!! Download Tamilrockers 26.md deleted file mode 100644 index fdcb3c7da323a652ccfb52a5ca3574cc85912ddc..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Mankatha Movie !!INSTALL!! Download Tamilrockers 26.md +++ /dev/null @@ -1,8 +0,0 @@ -

      mankatha movie download tamilrockers 26


      Download File >>> https://gohhs.com/2uFVkc



      - -Mankata (translation of Gambling) is a 2011 Indian action thriller in Tamil written and directed by Venkat Prabhu. It stars Ajit Kumar, who is turning 50 years old. The film has an adventurous plot where the main characters fight against corrupt military officials who want to seize and steal money from the bank and fight them to restore justice. -The film is set in the spirit of Prabhu's previous films (who won the 2010 Best Action Movie award) - it's fast-paced, tense and keeps you on your toes until the very end. -The plot is simple, but it allows the actors to showcase their skills and showcase their charisma. 8a78ff9644
      -
      -
      -

      diff --git a/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/commons.py b/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/commons.py deleted file mode 100644 index 9ad0444b61cbadaa388619986c2889c707d873ce..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/commons.py +++ /dev/null @@ -1,161 +0,0 @@ -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/text/cleaner.py b/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/text/cleaner.py deleted file mode 100644 index 64bd5f7296f66c94f3a335666c53706bb5fe5b39..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/text/cleaner.py +++ /dev/null @@ -1,27 +0,0 @@ -from text import chinese, cleaned_text_to_sequence - - -language_module_map = { - 'ZH': chinese -} - - -def clean_text(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - return norm_text, phones, tones, word2ph - -def clean_text_bert(text, language): - language_module = language_module_map[language] - norm_text = language_module.text_normalize(text) - phones, tones, word2ph = language_module.g2p(norm_text) - bert = language_module.get_bert_feature(norm_text, word2ph) - return phones, tones, bert - -def text_to_sequence(text, language): - norm_text, phones, tones, word2ph = clean_text(text, language) - return cleaned_text_to_sequence(phones, tones, language) - -if __name__ == '__main__': - pass diff --git a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/text/chinese.py b/spaces/digitalxingtong/Jiuxia-Bert-Vits2/text/chinese.py deleted file mode 100644 index 276753880b73de2e8889dcb2101cd98c09e0710b..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/text/chinese.py +++ /dev/null @@ -1,193 +0,0 @@ -import os -import re - -import cn2an -from pypinyin import lazy_pinyin, Style - -from text import symbols -from text.symbols import punctuation -from text.tone_sandhi import ToneSandhi - -current_file_path = os.path.dirname(__file__) -pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in - open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()} - -import jieba.posseg as psg - - -rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - '$': '.', - '“': "'", - '”': "'", - '‘': "'", - '’': "'", - '(': "'", - ')': "'", - '(': "'", - ')': "'", - '《': "'", - '》': "'", - '【': "'", - '】': "'", - '[': "'", - ']': "'", - '—': "-", - '~': "-", - '~': "-", - '「': "'", - '」': "'", - -} - -tone_modifier = ToneSandhi() - -def replace_punctuation(text): - text = text.replace("嗯", "恩").replace("呣","母") - pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys())) - - replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) - - replaced_text = re.sub(r'[^\u4e00-\u9fa5'+"".join(punctuation)+r']+', '', replaced_text) - - return replaced_text - -def g2p(text): - pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation)) - sentences = [i for i in re.split(pattern, text) if i.strip()!=''] - phones, tones, word2ph = _g2p(sentences) - assert sum(word2ph) == len(phones) - assert len(word2ph) == len(text) #Sometimes it will crash,you can add a try-catch. - phones = ['_'] + phones + ["_"] - tones = [0] + tones + [0] - word2ph = [1] + word2ph + [1] - return phones, tones, word2ph - - -def _get_initials_finals(word): - initials = [] - finals = [] - orig_initials = lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.INITIALS) - orig_finals = lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for c, v in zip(orig_initials, orig_finals): - initials.append(c) - finals.append(v) - return initials, finals - - -def _g2p(segments): - phones_list = [] - tones_list = [] - word2ph = [] - for seg in segments: - pinyins = [] - # Replace all English words in the sentence - seg = re.sub('[a-zA-Z]+', '', seg) - seg_cut = psg.lcut(seg) - initials = [] - finals = [] - seg_cut = tone_modifier.pre_merge_for_modify(seg_cut) - for word, pos in seg_cut: - if pos == 'eng': - continue - sub_initials, sub_finals = _get_initials_finals(word) - sub_finals = tone_modifier.modified_tone(word, pos, - sub_finals) - initials.append(sub_initials) - finals.append(sub_finals) - - # assert len(sub_initials) == len(sub_finals) == len(word) - initials = sum(initials, []) - finals = sum(finals, []) - # - for c, v in zip(initials, finals): - raw_pinyin = c+v - # NOTE: post process for pypinyin outputs - # we discriminate i, ii and iii - if c == v: - assert c in punctuation - phone = [c] - tone = '0' - word2ph.append(1) - else: - v_without_tone = v[:-1] - tone = v[-1] - - pinyin = c+v_without_tone - assert tone in '12345' - - if c: - # 多音节 - v_rep_map = { - "uei": 'ui', - 'iou': 'iu', - 'uen': 'un', - } - if v_without_tone in v_rep_map.keys(): - pinyin = c+v_rep_map[v_without_tone] - else: - # 单音节 - pinyin_rep_map = { - 'ing': 'ying', - 'i': 'yi', - 'in': 'yin', - 'u': 'wu', - } - if pinyin in pinyin_rep_map.keys(): - pinyin = pinyin_rep_map[pinyin] - else: - single_rep_map = { - 'v': 'yu', - 'e': 'e', - 'i': 'y', - 'u': 'w', - } - if pinyin[0] in single_rep_map.keys(): - pinyin = single_rep_map[pinyin[0]]+pinyin[1:] - - assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin) - phone = pinyin_to_symbol_map[pinyin].split(' ') - word2ph.append(len(phone)) - - phones_list += phone - tones_list += [int(tone)] * len(phone) - return phones_list, tones_list, word2ph - - - -def text_normalize(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - text = replace_punctuation(text) - return text - -def get_bert_feature(text, word2ph): - from text import chinese_bert - return chinese_bert.get_bert_feature(text, word2ph) - -if __name__ == '__main__': - from text.chinese_bert import get_bert_feature - text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏" - text = text_normalize(text) - print(text) - phones, tones, word2ph = g2p(text) - bert = get_bert_feature(text, word2ph) - - print(phones, tones, word2ph, bert.shape) - - -# # 示例用法 -# text = "这是一个示例文本:,你好!这是一个测试...." -# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试 diff --git a/spaces/digitalxingtong/Nanami-Bert-VITS2/transcribe_genshin.py b/spaces/digitalxingtong/Nanami-Bert-VITS2/transcribe_genshin.py deleted file mode 100644 index acc98814af6189d129ab85946525bec55419a33f..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Nanami-Bert-VITS2/transcribe_genshin.py +++ /dev/null @@ -1,78 +0,0 @@ -# coding=gbk -import os -import argparse -import librosa -import numpy as np -from multiprocessing import Pool, cpu_count - -import soundfile -from scipy.io import wavfile -from tqdm import tqdm - -global speaker_annos -speaker_annos = [] - -def process(item): - spkdir, wav_name, args = item - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - if os.path.exists(wav_path) and '.wav' in wav_path: - os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True) - wav, sr = librosa.load(wav_path, sr=args.sr) - soundfile.write( - os.path.join(args.out_dir, speaker, wav_name), - wav, - sr - ) - -def process_text(item): - spkdir, wav_name, args = item - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - global speaker_annos - tr_name = wav_name.replace('.wav', '') - with open(args.out_dir+'/'+speaker+'/'+tr_name+'.lab', "r", encoding="utf-8") as file: - text = file.read() - text = text.replace("{NICKNAME}",'') - text = text.replace("{M#}{F#}",'') - text = text.replace("{M#}{F#}",'') - substring = "{M#}{F#}" - if substring in text: - if tr_name.endswith("a"): - text = text.replace("{M#}{F#}",'') - if tr_name.endswith("b"): - text = text.replace("{M#}{F#}",'') - text = text.replace("#",'') - text = "ZH|" + text + "\n" # - speaker_annos.append(args.out_dir+'/'+speaker+'/'+wav_name+ "|" + speaker + "|" + text) - - - -if __name__ == "__main__": - parent_dir = "./genshin_dataset/" - speaker_names = list(os.walk(parent_dir))[0][1] - parser = argparse.ArgumentParser() - parser.add_argument("--sr", type=int, default=44100, help="sampling rate") - parser.add_argument("--in_dir", type=str, default="./genshin_dataset", help="path to source dir") - parser.add_argument("--out_dir", type=str, default="./genshin_dataset", help="path to target dir") - args = parser.parse_args() - # processs = 8 - processs = cpu_count()-2 if cpu_count() >4 else 1 - pool = Pool(processes=processs) - - for speaker in os.listdir(args.in_dir): - spk_dir = os.path.join(args.in_dir, speaker) - if os.path.isdir(spk_dir): - print(spk_dir) - for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])): - pass - for i in os.listdir(spk_dir): - if i.endswith("wav"): - pro=(spk_dir, i, args) - process_text(pro) - if len(speaker_annos) == 0: - print("transcribe error!!!") - with open("./filelists/short_character_anno.list", 'w', encoding='utf-8') as f: - for line in speaker_annos: - f.write(line) - print("transcript file finished.") diff --git a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/attentions.py b/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/attentions.py deleted file mode 100644 index ecbdbc8be941a962046fc11fd6739b093112123e..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/attentions.py +++ /dev/null @@ -1,343 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from torch.nn.utils import weight_norm, remove_weight_norm -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - if isflow: - cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1) - self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1) - self.cond_layer = weight_norm(cond_layer, name='weight') - self.gin_channels = 256 - self.cond_layer_idx = self.n_layers - if 'gin_channels' in kwargs: - self.gin_channels = kwargs['gin_channels'] - if self.gin_channels != 0: - self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels) - # vits2 says 3rd block, so idx is 2 by default - self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2 - print(self.gin_channels, self.cond_layer_idx) - assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers' - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - def forward(self, x, x_mask, g=None): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - if i == self.cond_layer_idx and g is not None: - g = self.spk_emb_linear(g.transpose(1, 2)) - g = g.transpose(1, 2) - x = x + g - x = x * x_mask - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/text/tone_sandhi.py b/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/text/tone_sandhi.py deleted file mode 100644 index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/text/tone_sandhi.py +++ /dev/null @@ -1,351 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import List -from typing import Tuple - -import jieba -from pypinyin import lazy_pinyin -from pypinyin import Style - - -class ToneSandhi(): - def __init__(self): - self.must_neural_tone_words = { - '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝', - '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊', - '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去', - '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号', - '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当', - '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻', - '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂', - '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆', - '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂', - '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿', - '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台', - '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算', - '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨', - '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快', - '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜', - '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔', - '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事', - '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾', - '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼', - '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实', - '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头', - '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼', - '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数', - '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气', - '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈', - '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方', - '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴', - '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦', - '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝', - '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹', - '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息', - '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤', - '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家', - '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故', - '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨', - '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅', - '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱', - '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱', - '扫把', '惦记' - } - self.must_not_neural_tone_words = { - "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎" - } - self.punc = ":,;。?!“”‘’':,;.?!" - - # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041 - # e.g. - # word: "家里" - # pos: "s" - # finals: ['ia1', 'i3'] - def _neural_sandhi(self, word: str, pos: str, - finals: List[str]) -> List[str]: - - # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺 - for j, item in enumerate(word): - if j - 1 >= 0 and item == word[j - 1] and pos[0] in { - "n", "v", "a" - } and word not in self.must_not_neural_tone_words: - finals[j] = finals[j][:-1] + "5" - ge_idx = word.find("个") - if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶": - finals[-1] = finals[-1][:-1] + "5" - elif len(word) >= 1 and word[-1] in "的地得": - finals[-1] = finals[-1][:-1] + "5" - # e.g. 走了, 看着, 去过 - # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}: - # finals[-1] = finals[-1][:-1] + "5" - elif len(word) > 1 and word[-1] in "们子" and pos in { - "r", "n" - } and word not in self.must_not_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 桌上, 地下, 家里 - elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}: - finals[-1] = finals[-1][:-1] + "5" - # e.g. 上来, 下去 - elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开": - finals[-1] = finals[-1][:-1] + "5" - # 个做量词 - elif (ge_idx >= 1 and - (word[ge_idx - 1].isnumeric() or - word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个': - finals[ge_idx] = finals[ge_idx][:-1] + "5" - else: - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals[-1] = finals[-1][:-1] + "5" - - word_list = self._split_word(word) - finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]] - for i, word in enumerate(word_list): - # conventional neural in Chinese - if word in self.must_neural_tone_words or word[ - -2:] in self.must_neural_tone_words: - finals_list[i][-1] = finals_list[i][-1][:-1] + "5" - finals = sum(finals_list, []) - return finals - - def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]: - # e.g. 看不懂 - if len(word) == 3 and word[1] == "不": - finals[1] = finals[1][:-1] + "5" - else: - for i, char in enumerate(word): - # "不" before tone4 should be bu2, e.g. 不怕 - if char == "不" and i + 1 < len(word) and finals[i + - 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - return finals - - def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]: - # "一" in number sequences, e.g. 一零零, 二一零 - if word.find("一") != -1 and all( - [item.isnumeric() for item in word if item != "一"]): - return finals - # "一" between reduplication words shold be yi5, e.g. 看一看 - elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]: - finals[1] = finals[1][:-1] + "5" - # when "一" is ordinal word, it should be yi1 - elif word.startswith("第一"): - finals[1] = finals[1][:-1] + "1" - else: - for i, char in enumerate(word): - if char == "一" and i + 1 < len(word): - # "一" before tone4 should be yi2, e.g. 一段 - if finals[i + 1][-1] == "4": - finals[i] = finals[i][:-1] + "2" - # "一" before non-tone4 should be yi4, e.g. 一天 - else: - # "一" 后面如果是标点,还读一声 - if word[i + 1] not in self.punc: - finals[i] = finals[i][:-1] + "4" - return finals - - def _split_word(self, word: str) -> List[str]: - word_list = jieba.cut_for_search(word) - word_list = sorted(word_list, key=lambda i: len(i), reverse=False) - first_subword = word_list[0] - first_begin_idx = word.find(first_subword) - if first_begin_idx == 0: - second_subword = word[len(first_subword):] - new_word_list = [first_subword, second_subword] - else: - second_subword = word[:-len(first_subword)] - new_word_list = [second_subword, first_subword] - return new_word_list - - def _three_sandhi(self, word: str, finals: List[str]) -> List[str]: - if len(word) == 2 and self._all_tone_three(finals): - finals[0] = finals[0][:-1] + "2" - elif len(word) == 3: - word_list = self._split_word(word) - if self._all_tone_three(finals): - # disyllabic + monosyllabic, e.g. 蒙古/包 - if len(word_list[0]) == 2: - finals[0] = finals[0][:-1] + "2" - finals[1] = finals[1][:-1] + "2" - # monosyllabic + disyllabic, e.g. 纸/老虎 - elif len(word_list[0]) == 1: - finals[1] = finals[1][:-1] + "2" - else: - finals_list = [ - finals[:len(word_list[0])], finals[len(word_list[0]):] - ] - if len(finals_list) == 2: - for i, sub in enumerate(finals_list): - # e.g. 所有/人 - if self._all_tone_three(sub) and len(sub) == 2: - finals_list[i][0] = finals_list[i][0][:-1] + "2" - # e.g. 好/喜欢 - elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \ - finals_list[0][-1][-1] == "3": - - finals_list[0][-1] = finals_list[0][-1][:-1] + "2" - finals = sum(finals_list, []) - # split idiom into two words who's length is 2 - elif len(word) == 4: - finals_list = [finals[:2], finals[2:]] - finals = [] - for sub in finals_list: - if self._all_tone_three(sub): - sub[0] = sub[0][:-1] + "2" - finals += sub - - return finals - - def _all_tone_three(self, finals: List[str]) -> bool: - return all(x[-1] == "3" for x in finals) - - # merge "不" and the word behind it - # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error - def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - last_word = "" - for word, pos in seg: - if last_word == "不": - word = last_word + word - if word != "不": - new_seg.append((word, pos)) - last_word = word[:] - if last_word == "不": - new_seg.append((last_word, 'd')) - last_word = "" - return new_seg - - # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听" - # function 2: merge single "一" and the word behind it - # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error - # e.g. - # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')] - # output seg: [['听一听', 'v']] - def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - # function 1 - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][ - 0] == seg[i + 1][0] and seg[i - 1][1] == "v": - new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0] - else: - if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][ - 0] == word and pos == "v": - continue - else: - new_seg.append([word, pos]) - seg = new_seg - new_seg = [] - # function 2 - for i, (word, pos) in enumerate(seg): - if new_seg and new_seg[-1][0] == "一": - new_seg[-1][0] = new_seg[-1][0] + word - else: - new_seg.append([word, pos]) - return new_seg - - # the first and the second words are all_tone_three - def _merge_continuous_three_tones( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and self._all_tone_three( - sub_finals_list[i - 1]) and self._all_tone_three( - sub_finals_list[i]) and not merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - - return new_seg - - def _is_reduplication(self, word: str) -> bool: - return len(word) == 2 and word[0] == word[1] - - # the last char of first word and the first char of second word is tone_three - def _merge_continuous_three_tones_2( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - sub_finals_list = [ - lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for (word, pos) in seg - ] - assert len(sub_finals_list) == len(seg) - merge_last = [False] * len(seg) - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \ - merge_last[i - 1]: - # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi - if not self._is_reduplication(seg[i - 1][0]) and len( - seg[i - 1][0]) + len(seg[i][0]) <= 3: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - merge_last[i] = True - else: - new_seg.append([word, pos]) - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#": - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def _merge_reduplication( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - new_seg = [] - for i, (word, pos) in enumerate(seg): - if new_seg and word == new_seg[-1][0]: - new_seg[-1][0] = new_seg[-1][0] + seg[i][0] - else: - new_seg.append([word, pos]) - return new_seg - - def pre_merge_for_modify( - self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]: - seg = self._merge_bu(seg) - try: - seg = self._merge_yi(seg) - except: - print("_merge_yi failed") - seg = self._merge_reduplication(seg) - seg = self._merge_continuous_three_tones(seg) - seg = self._merge_continuous_three_tones_2(seg) - seg = self._merge_er(seg) - return seg - - def modified_tone(self, word: str, pos: str, - finals: List[str]) -> List[str]: - finals = self._bu_sandhi(word, finals) - finals = self._yi_sandhi(word, finals) - finals = self._neural_sandhi(word, pos, finals) - finals = self._three_sandhi(word, finals) - return finals diff --git a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/transcribe_genshin.py b/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/transcribe_genshin.py deleted file mode 100644 index acc98814af6189d129ab85946525bec55419a33f..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/transcribe_genshin.py +++ /dev/null @@ -1,78 +0,0 @@ -# coding=gbk -import os -import argparse -import librosa -import numpy as np -from multiprocessing import Pool, cpu_count - -import soundfile -from scipy.io import wavfile -from tqdm import tqdm - -global speaker_annos -speaker_annos = [] - -def process(item): - spkdir, wav_name, args = item - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - if os.path.exists(wav_path) and '.wav' in wav_path: - os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True) - wav, sr = librosa.load(wav_path, sr=args.sr) - soundfile.write( - os.path.join(args.out_dir, speaker, wav_name), - wav, - sr - ) - -def process_text(item): - spkdir, wav_name, args = item - speaker = spkdir.replace("\\", "/").split("/")[-1] - wav_path = os.path.join(args.in_dir, speaker, wav_name) - global speaker_annos - tr_name = wav_name.replace('.wav', '') - with open(args.out_dir+'/'+speaker+'/'+tr_name+'.lab', "r", encoding="utf-8") as file: - text = file.read() - text = text.replace("{NICKNAME}",'') - text = text.replace("{M#}{F#}",'') - text = text.replace("{M#}{F#}",'') - substring = "{M#}{F#}" - if substring in text: - if tr_name.endswith("a"): - text = text.replace("{M#}{F#}",'') - if tr_name.endswith("b"): - text = text.replace("{M#}{F#}",'') - text = text.replace("#",'') - text = "ZH|" + text + "\n" # - speaker_annos.append(args.out_dir+'/'+speaker+'/'+wav_name+ "|" + speaker + "|" + text) - - - -if __name__ == "__main__": - parent_dir = "./genshin_dataset/" - speaker_names = list(os.walk(parent_dir))[0][1] - parser = argparse.ArgumentParser() - parser.add_argument("--sr", type=int, default=44100, help="sampling rate") - parser.add_argument("--in_dir", type=str, default="./genshin_dataset", help="path to source dir") - parser.add_argument("--out_dir", type=str, default="./genshin_dataset", help="path to target dir") - args = parser.parse_args() - # processs = 8 - processs = cpu_count()-2 if cpu_count() >4 else 1 - pool = Pool(processes=processs) - - for speaker in os.listdir(args.in_dir): - spk_dir = os.path.join(args.in_dir, speaker) - if os.path.isdir(spk_dir): - print(spk_dir) - for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])): - pass - for i in os.listdir(spk_dir): - if i.endswith("wav"): - pro=(spk_dir, i, args) - process_text(pro) - if len(speaker_annos) == 0: - print("transcribe error!!!") - with open("./filelists/short_character_anno.list", 'w', encoding='utf-8') as f: - for line in speaker_annos: - f.write(line) - print("transcript file finished.") diff --git a/spaces/dineshreddy/WALT/mmdet/models/backbones/detectors_resnext.py b/spaces/dineshreddy/WALT/mmdet/models/backbones/detectors_resnext.py deleted file mode 100644 index 57d032fe37ed82d5ba24e761bdc014cc0ee5ac64..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/models/backbones/detectors_resnext.py +++ /dev/null @@ -1,122 +0,0 @@ -import math - -from mmcv.cnn import build_conv_layer, build_norm_layer - -from ..builder import BACKBONES -from .detectors_resnet import Bottleneck as _Bottleneck -from .detectors_resnet import DetectoRS_ResNet - - -class Bottleneck(_Bottleneck): - expansion = 4 - - def __init__(self, - inplanes, - planes, - groups=1, - base_width=4, - base_channels=64, - **kwargs): - """Bottleneck block for ResNeXt. - - If style is "pytorch", the stride-two layer is the 3x3 conv layer, if - it is "caffe", the stride-two layer is the first 1x1 conv layer. - """ - super(Bottleneck, self).__init__(inplanes, planes, **kwargs) - - if groups == 1: - width = self.planes - else: - width = math.floor(self.planes * - (base_width / base_channels)) * groups - - self.norm1_name, norm1 = build_norm_layer( - self.norm_cfg, width, postfix=1) - self.norm2_name, norm2 = build_norm_layer( - self.norm_cfg, width, postfix=2) - self.norm3_name, norm3 = build_norm_layer( - self.norm_cfg, self.planes * self.expansion, postfix=3) - - self.conv1 = build_conv_layer( - self.conv_cfg, - self.inplanes, - width, - kernel_size=1, - stride=self.conv1_stride, - bias=False) - self.add_module(self.norm1_name, norm1) - fallback_on_stride = False - self.with_modulated_dcn = False - if self.with_dcn: - fallback_on_stride = self.dcn.pop('fallback_on_stride', False) - if self.with_sac: - self.conv2 = build_conv_layer( - self.sac, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - elif not self.with_dcn or fallback_on_stride: - self.conv2 = build_conv_layer( - self.conv_cfg, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - else: - assert self.conv_cfg is None, 'conv_cfg must be None for DCN' - self.conv2 = build_conv_layer( - self.dcn, - width, - width, - kernel_size=3, - stride=self.conv2_stride, - padding=self.dilation, - dilation=self.dilation, - groups=groups, - bias=False) - - self.add_module(self.norm2_name, norm2) - self.conv3 = build_conv_layer( - self.conv_cfg, - width, - self.planes * self.expansion, - kernel_size=1, - bias=False) - self.add_module(self.norm3_name, norm3) - - -@BACKBONES.register_module() -class DetectoRS_ResNeXt(DetectoRS_ResNet): - """ResNeXt backbone for DetectoRS. - - Args: - groups (int): The number of groups in ResNeXt. - base_width (int): The base width of ResNeXt. - """ - - arch_settings = { - 50: (Bottleneck, (3, 4, 6, 3)), - 101: (Bottleneck, (3, 4, 23, 3)), - 152: (Bottleneck, (3, 8, 36, 3)) - } - - def __init__(self, groups=1, base_width=4, **kwargs): - self.groups = groups - self.base_width = base_width - super(DetectoRS_ResNeXt, self).__init__(**kwargs) - - def make_res_layer(self, **kwargs): - return super().make_res_layer( - groups=self.groups, - base_width=self.base_width, - base_channels=self.base_channels, - **kwargs) diff --git a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_pipelines/master_pipeline.py b/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_pipelines/master_pipeline.py deleted file mode 100644 index 2071df4f665932dacd4a827e418603996fb562c8..0000000000000000000000000000000000000000 --- a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_pipelines/master_pipeline.py +++ /dev/null @@ -1,42 +0,0 @@ -img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='ResizeOCR', - height=48, - min_width=48, - max_width=160, - keep_aspect_ratio=True), - dict(type='ToTensorOCR'), - dict(type='NormalizeOCR', **img_norm_cfg), - dict( - type='Collect', - keys=['img'], - meta_keys=[ - 'filename', 'ori_shape', 'img_shape', 'text', 'valid_ratio', - 'resize_shape' - ]), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiRotateAugOCR', - rotate_degrees=[0, 90, 270], - transforms=[ - dict( - type='ResizeOCR', - height=48, - min_width=48, - max_width=160, - keep_aspect_ratio=True), - dict(type='ToTensorOCR'), - dict(type='NormalizeOCR', **img_norm_cfg), - dict( - type='Collect', - keys=['img'], - meta_keys=[ - 'filename', 'ori_shape', 'img_shape', 'valid_ratio', - 'img_norm_cfg', 'ori_filename', 'resize_shape' - ]), - ]) -] diff --git a/spaces/dmeck/RVC-Speakers/speakers/server/bootstrap/__init__.py b/spaces/dmeck/RVC-Speakers/speakers/server/bootstrap/__init__.py deleted file mode 100644 index ccaf0c7cc17700a7521df36fcc8bf6f38642baa8..0000000000000000000000000000000000000000 --- a/spaces/dmeck/RVC-Speakers/speakers/server/bootstrap/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from speakers.server.bootstrap.base import Bootstrap -from speakers.server.bootstrap.runner_bootstrap import RunnerBootstrapBaseWeb -from speakers.server.bootstrap.bootstrap_register import bootstrap_register - -__all__ = [ - "Bootstrap", - "RunnerBootstrapBaseWeb" -] diff --git a/spaces/docparser/Text_Captcha_breaker/app.py b/spaces/docparser/Text_Captcha_breaker/app.py deleted file mode 100644 index ce0749e6fa457d9bb67171b0557463bbcb03913c..0000000000000000000000000000000000000000 --- a/spaces/docparser/Text_Captcha_breaker/app.py +++ /dev/null @@ -1,77 +0,0 @@ -import torch -import onnx -import onnxruntime as rt -from torchvision import transforms as T -from PIL import Image -from tokenizer_base import Tokenizer -import pathlib -import os -import gradio as gr -from huggingface_hub import Repository - -repo = Repository( - local_dir="secret_models", - repo_type="model", - clone_from="docparser/captcha", - token=True -) -repo.git_pull() - -cwd = pathlib.Path(__file__).parent.resolve() -model_file = os.path.join(cwd,"secret_models","captcha.onnx") -img_size = (32,128) -charset = r"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" -tokenizer_base = Tokenizer(charset) - -def get_transform(img_size): - transforms = [] - transforms.extend([ - T.Resize(img_size, T.InterpolationMode.BICUBIC), - T.ToTensor(), - T.Normalize(0.5, 0.5) - ]) - return T.Compose(transforms) - -def to_numpy(tensor): - return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy() - -def initialize_model(model_file): - transform = get_transform(img_size) - # Onnx model loading - onnx_model = onnx.load(model_file) - onnx.checker.check_model(onnx_model) - ort_session = rt.InferenceSession(model_file) - return transform,ort_session - -def get_text(img_org): - # img_org = Image.open(image_path) - # Preprocess. Model expects a batch of images with shape: (B, C, H, W) - x = transform(img_org.convert('RGB')).unsqueeze(0) - - # compute ONNX Runtime output prediction - ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)} - logits = ort_session.run(None, ort_inputs)[0] - probs = torch.tensor(logits).softmax(-1) - preds, probs = tokenizer_base.decode(probs) - preds = preds[0] - print(preds) - return preds - -transform,ort_session = initialize_model(model_file=model_file) - -gr.Interface( - get_text, - inputs=gr.Image(type="pil"), - outputs=gr.outputs.Textbox(), - title="Text Captcha Reader", - examples=["8000.png","11JW29.png","2a8486.jpg","2nbcx.png", - "000679.png","000HU.png","00Uga.png.jpg","00bAQwhAZU.jpg", - "00h57kYf.jpg","0EoHdtVb.png","0JS21.png","0p98z.png","10010.png"] -).launch() - -# if __name__ == "__main__": -# image_path = "8000.png" -# preds,probs = get_text(image_path) -# print(preds[0]) - - diff --git a/spaces/dorkai/text-generation-webui-main/docs/Spell-book.md b/spaces/dorkai/text-generation-webui-main/docs/Spell-book.md deleted file mode 100644 index 9b7c76c953f76f8a486bbe5156de4e9ebb3f0ec0..0000000000000000000000000000000000000000 --- a/spaces/dorkai/text-generation-webui-main/docs/Spell-book.md +++ /dev/null @@ -1,107 +0,0 @@ -You have now entered a hidden corner of the internet. - -A confusing yet intriguing realm of paradoxes and contradictions. - -A place where you will find out that what you thought you knew, you in fact didn't know, and what you didn't know was in front of you all along. - -![](https://i.pinimg.com/originals/6e/e2/7b/6ee27bad351d3aca470d80f1033ba9c6.jpg) - -*In other words, here I will document little-known facts about this web UI that I could not find another place for in the wiki.* - -#### You can train LoRAs in CPU mode - -Load the web UI with - -``` -python server.py --cpu -``` - -and start training the LoRA from the training tab as usual. - -#### 8-bit mode works with CPU offloading - -``` -python server.py --load-in-8bit --gpu-memory 4000MiB -``` - -#### `--pre_layer`, and not `--gpu-memory`, is the right way to do CPU offloading with 4-bit models - -``` -python server.py --wbits 4 --groupsize 128 --pre_layer 20 -``` - -#### Models can be loaded in 32-bit, 16-bit, 8-bit, and 4-bit modes - -``` -python server.py --cpu -python server.py -python server.py --load-in-8bit -python server.py --wbits 4 -``` - -#### The web UI works with any version of GPTQ-for-LLaMa - -Including the up to date triton and cuda branches. But you have to delete the `repositories/GPTQ-for-LLaMa` folder and reinstall the new one every time: - -``` -cd text-generation-webui/repositories -rm -r GPTQ-for-LLaMa -pip uninstall quant-cuda -git clone https://github.com/oobabooga/GPTQ-for-LLaMa -b cuda # or any other repository and branch -cd GPTQ-for-LLaMa -python setup_cuda.py install -``` - -#### Instruction-following templates are represented as chat characters - -https://github.com/oobabooga/text-generation-webui/tree/main/characters/instruction-following - -#### The right way to run Alpaca, Open Assistant, Vicuna, etc is Instruct mode, not normal chat mode - -Otherwise the prompt will not be formatted correctly. - -1. Start the web UI with - -``` -python server.py --chat -``` - -2. Click on the "instruct" option under "Chat modes" - -3. Select the correct template in the hidden dropdown menu that will become visible. - -#### Notebook mode is best mode - -Ascended individuals have realized that notebook mode is the superset of chat mode and can do chats with ultimate flexibility, including group chats, editing replies, starting a new bot reply in a given way, and impersonating. - -#### RWKV is a RNN - -Most models are transformers, but not RWKV, which is a RNN. It's a great model. - -#### `--gpu-memory` is not a hard limit on the GPU memory - -It is simply a parameter that is passed to the `accelerate` library while loading the model. More memory will be allocated during generation. That's why this parameter has to be set to less than your total GPU memory. - -#### Contrastive search perhaps the best preset - -But it uses a ton of VRAM. - -#### You can check the sha256sum of downloaded models with the download script - -``` -python download-model.py facebook/galactica-125m --check -``` - -#### The download script continues interrupted downloads by default - -It doesn't start over. - -#### You can download models with multiple threads - -``` -python download-model.py facebook/galactica-125m --threads 8 -``` - -#### LoRAs work in 4-bit mode - -You need to follow [these instructions](GPTQ-models-(4-bit-mode).md#using-loras-in-4-bit-mode) and then start the web UI with the `--monkey-patch` flag. diff --git a/spaces/dragao-elastico/RVC_V2/utils.py b/spaces/dragao-elastico/RVC_V2/utils.py deleted file mode 100644 index 62be8d03a8e8b839f8747310ef0ec0e82fb8ff0a..0000000000000000000000000000000000000000 --- a/spaces/dragao-elastico/RVC_V2/utils.py +++ /dev/null @@ -1,151 +0,0 @@ -import ffmpeg -import numpy as np - -# import praatio -# import praatio.praat_scripts -import os -import sys - -import random - -import csv - -platform_stft_mapping = { - "linux": "stftpitchshift", - "darwin": "stftpitchshift", - "win32": "stftpitchshift.exe", -} - -stft = platform_stft_mapping.get(sys.platform) -# praatEXE = join('.',os.path.abspath(os.getcwd()) + r"\Praat.exe") - - -def CSVutil(file, rw, type, *args): - if type == "formanting": - if rw == "r": - with open(file) as fileCSVread: - csv_reader = list(csv.reader(fileCSVread)) - return ( - (csv_reader[0][0], csv_reader[0][1], csv_reader[0][2]) - if csv_reader is not None - else (lambda: exec('raise ValueError("No data")'))() - ) - else: - if args: - doformnt = args[0] - else: - doformnt = False - qfr = args[1] if len(args) > 1 else 1.0 - tmb = args[2] if len(args) > 2 else 1.0 - with open(file, rw, newline="") as fileCSVwrite: - csv_writer = csv.writer(fileCSVwrite, delimiter=",") - csv_writer.writerow([doformnt, qfr, tmb]) - elif type == "stop": - stop = args[0] if args else False - with open(file, rw, newline="") as fileCSVwrite: - csv_writer = csv.writer(fileCSVwrite, delimiter=",") - csv_writer.writerow([stop]) - - -def load_audio(file, sr, DoFormant, Quefrency, Timbre): - converted = False - DoFormant, Quefrency, Timbre = CSVutil("csvdb/formanting.csv", "r", "formanting") - try: - # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26 - # This launches a subprocess to decode audio while down-mixing and resampling as necessary. - # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. - file = ( - file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) # 防止小白拷路径头尾带了空格和"和回车 - file_formanted = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - - # print(f"dofor={bool(DoFormant)} timbr={Timbre} quef={Quefrency}\n") - - if ( - lambda DoFormant: True - if DoFormant.lower() == "true" - else (False if DoFormant.lower() == "false" else DoFormant) - )(DoFormant): - numerator = round(random.uniform(1, 4), 4) - # os.system(f"stftpitchshift -i {file} -q {Quefrency} -t {Timbre} -o {file_formanted}") - # print('stftpitchshift -i "%s" -p 1.0 --rms -w 128 -v 8 -q %s -t %s -o "%s"' % (file, Quefrency, Timbre, file_formanted)) - - if not file.endswith(".wav"): - if not os.path.isfile(f"{file_formanted}.wav"): - converted = True - # print(f"\nfile = {file}\n") - # print(f"\nfile_formanted = {file_formanted}\n") - converting = ( - ffmpeg.input(file_formanted, threads=0) - .output(f"{file_formanted}.wav") - .run( - cmd=["ffmpeg", "-nostdin"], - capture_stdout=True, - capture_stderr=True, - ) - ) - else: - pass - - file_formanted = ( - f"{file_formanted}.wav" - if not file_formanted.endswith(".wav") - else file_formanted - ) - - print(f" · Formanting {file_formanted}...\n") - - os.system( - '%s -i "%s" -q "%s" -t "%s" -o "%sFORMANTED_%s.wav"' - % ( - stft, - file_formanted, - Quefrency, - Timbre, - file_formanted, - str(numerator), - ) - ) - - print(f" · Formanted {file_formanted}!\n") - - # filepraat = (os.path.abspath(os.getcwd()) + '\\' + file).replace('/','\\') - # file_formantedpraat = ('"' + os.path.abspath(os.getcwd()) + '/' + 'formanted'.join(file_formanted) + '"').replace('/','\\') - # print("%sFORMANTED_%s.wav" % (file_formanted, str(numerator))) - - out, _ = ( - ffmpeg.input( - "%sFORMANTED_%s.wav" % (file_formanted, str(numerator)), threads=0 - ) - .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) - .run( - cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True - ) - ) - - try: - os.remove("%sFORMANTED_%s.wav" % (file_formanted, str(numerator))) - except Exception: - pass - print("couldn't remove formanted type of file") - - else: - out, _ = ( - ffmpeg.input(file, threads=0) - .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr) - .run( - cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True - ) - ) - except Exception as e: - raise RuntimeError(f"Failed to load audio: {e}") - - if converted: - try: - os.remove(file_formanted) - except Exception: - pass - print("couldn't remove converted type of file") - converted = False - - return np.frombuffer(out, np.float32).flatten() diff --git a/spaces/ds520/bingo/src/lib/hooks/use-copy-to-clipboard.tsx b/spaces/ds520/bingo/src/lib/hooks/use-copy-to-clipboard.tsx deleted file mode 100644 index 62f7156dca246c46b213151af003a3a177977ccf..0000000000000000000000000000000000000000 --- a/spaces/ds520/bingo/src/lib/hooks/use-copy-to-clipboard.tsx +++ /dev/null @@ -1,33 +0,0 @@ -'use client' - -import * as React from 'react' - -export interface useCopyToClipboardProps { - timeout?: number -} - -export function useCopyToClipboard({ - timeout = 2000 -}: useCopyToClipboardProps) { - const [isCopied, setIsCopied] = React.useState(false) - - const copyToClipboard = (value: string) => { - if (typeof window === 'undefined' || !navigator.clipboard?.writeText) { - return - } - - if (!value) { - return - } - - navigator.clipboard.writeText(value).then(() => { - setIsCopied(true) - - setTimeout(() => { - setIsCopied(false) - }, timeout) - }) - } - - return { isCopied, copyToClipboard } -} diff --git a/spaces/elkraken/Video-Object-Detection/utils/aws/resume.py b/spaces/elkraken/Video-Object-Detection/utils/aws/resume.py deleted file mode 100644 index 338685b19c19ddb47aa2fde22a535a8efcf17802..0000000000000000000000000000000000000000 --- a/spaces/elkraken/Video-Object-Detection/utils/aws/resume.py +++ /dev/null @@ -1,37 +0,0 @@ -# Resume all interrupted trainings in yolor/ dir including DDP trainings -# Usage: $ python utils/aws/resume.py - -import os -import sys -from pathlib import Path - -import torch -import yaml - -sys.path.append('./') # to run '$ python *.py' files in subdirectories - -port = 0 # --master_port -path = Path('').resolve() -for last in path.rglob('*/**/last.pt'): - ckpt = torch.load(last) - if ckpt['optimizer'] is None: - continue - - # Load opt.yaml - with open(last.parent.parent / 'opt.yaml') as f: - opt = yaml.load(f, Loader=yaml.SafeLoader) - - # Get device count - d = opt['device'].split(',') # devices - nd = len(d) # number of devices - ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel - - if ddp: # multi-GPU - port += 1 - cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' - else: # single-GPU - cmd = f'python train.py --resume {last}' - - cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread - print(cmd) - os.system(cmd) diff --git a/spaces/eson/tokenizer-arena/vocab/moss/convert_vocab_to_txt.py b/spaces/eson/tokenizer-arena/vocab/moss/convert_vocab_to_txt.py deleted file mode 100644 index b82bc4807144c434184193efb5128be7d445248f..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/moss/convert_vocab_to_txt.py +++ /dev/null @@ -1,16 +0,0 @@ -import json -from transformers import AutoTokenizer - -tokenizer = AutoTokenizer.from_pretrained("tokenizer", trust_remote_code=True) - -vocab = tokenizer.get_vocab() - -sorted_vocab = sorted(vocab.items(), key=lambda kv:kv[1]) - -f_out = open("20B_tokenizer.txt", "w", encoding="utf-8") -for token, idx in sorted_vocab: - decoded_token = tokenizer.decode([idx]) - f_out.write(json.dumps( - {"id": idx, "token": token, "token_decode": decoded_token}, ensure_ascii=False) + "\n") - # f_out.write(json.dumps({"id": idx, "token": token, "token_decode": decoded_token}) + "\t" + token + "\t" + decoded_token + "\n") - diff --git a/spaces/evanpierce/3D_Photo_Inpainting2/boostmonodepth_utils.py b/spaces/evanpierce/3D_Photo_Inpainting2/boostmonodepth_utils.py deleted file mode 100644 index 5f752b0caf9b8c9a64d9113e10d8b1fb2fa782b0..0000000000000000000000000000000000000000 --- a/spaces/evanpierce/3D_Photo_Inpainting2/boostmonodepth_utils.py +++ /dev/null @@ -1,68 +0,0 @@ -import os -import cv2 -import glob -import numpy as np -import imageio -from MiDaS.MiDaS_utils import write_depth - -BOOST_BASE = 'BoostingMonocularDepth' - -BOOST_INPUTS = 'inputs' -BOOST_OUTPUTS = 'outputs' - -def run_boostmonodepth(img_names, src_folder, depth_folder): - - if not isinstance(img_names, list): - img_names = [img_names] - - # remove irrelevant files first - clean_folder(os.path.join(BOOST_BASE, BOOST_INPUTS)) - clean_folder(os.path.join(BOOST_BASE, BOOST_OUTPUTS)) - - tgt_names = [] - for img_name in img_names: - base_name = os.path.basename(img_name) - tgt_name = os.path.join(BOOST_BASE, BOOST_INPUTS, base_name) - os.system(f'cp {img_name} {tgt_name}') - - # keep only the file name here. - # they save all depth as .png file - tgt_names.append(os.path.basename(tgt_name).replace('.jpg', '.png')) - - os.system(f'cd {BOOST_BASE} && python run.py --Final --data_dir {BOOST_INPUTS}/ --output_dir {BOOST_OUTPUTS} --depthNet 0') - - for i, (img_name, tgt_name) in enumerate(zip(img_names, tgt_names)): - img = imageio.imread(img_name) - H, W = img.shape[:2] - scale = 640. / max(H, W) - - # resize and save depth - target_height, target_width = int(round(H * scale)), int(round(W * scale)) - depth = imageio.imread(os.path.join(BOOST_BASE, BOOST_OUTPUTS, tgt_name)) - depth = np.array(depth).astype(np.float32) - depth = resize_depth(depth, target_width, target_height) - np.save(os.path.join(depth_folder, tgt_name.replace('.png', '.npy')), depth / 32768. - 1.) - write_depth(os.path.join(depth_folder, tgt_name.replace('.png', '')), depth) - -def clean_folder(folder, img_exts=['.png', '.jpg', '.npy']): - - for img_ext in img_exts: - paths_to_check = os.path.join(folder, f'*{img_ext}') - if len(glob.glob(paths_to_check)) == 0: - continue - print(paths_to_check) - os.system(f'rm {paths_to_check}') - -def resize_depth(depth, width, height): - """Resize numpy (or image read by imageio) depth map - - Args: - depth (numpy): depth - width (int): image width - height (int): image height - - Returns: - array: processed depth - """ - depth = cv2.blur(depth, (3, 3)) - return cv2.resize(depth, (width, height), interpolation=cv2.INTER_AREA) diff --git a/spaces/fatiXbelha/sd/Bus Simulator Indonesia Mod APK Download The Best Simulation Game for Bus Lovers.md b/spaces/fatiXbelha/sd/Bus Simulator Indonesia Mod APK Download The Best Simulation Game for Bus Lovers.md deleted file mode 100644 index 073839b77b1a66340172029ec2085ec1b75685e7..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Bus Simulator Indonesia Mod APK Download The Best Simulation Game for Bus Lovers.md +++ /dev/null @@ -1,133 +0,0 @@ - -

      How to Download APK Mod Simulator Bus Indonesia

      -

      If you are a fan of bus driving games, you might have heard of Simulator Bus Indonesia, a popular mobile game that lets you experience what it is like to be a bus driver in Indonesia. But did you know that you can also download an APK mod version of the game that gives you more features and options? In this article, we will tell you what Simulator Bus Indonesia is, what APK mod is, and how to download and install the APK mod version of the game on your Android device.

      -

      download apk mod simulator bus indonesia


      Downloadhttps://urllie.com/2uNFph



      -

      What is Simulator Bus Indonesia?

      -

      Simulator Bus Indonesia, or BUSSID, is a mobile game developed by Maleo. As the name suggests, this bus driving game lets you get behind the wheels of a bus and drive through various cities in Indonesia. It comes with 3D graphics and offers two modes, letting you choose your preferred gameplay option to ensure that you are comfortable as you play.

      -

      Features of the game

      -

      Some of the top features of Simulator Bus Indonesia are:

      -
        -
      • You can design your own livery for your bus, or choose from the ones available in the game.
      • -
      • You can control your bus with easy and intuitive controls, such as steering wheel, buttons, or tilt.
      • -
      • You can explore authentic Indonesian cities and places, such as Jakarta, Surabaya, Bali, and more.
      • -
      • You can drive different types of Indonesian buses, such as PO Harapan Jaya, Sinar Jaya, Gunung Harta, and more.
      • -
      • You can honk your horn with cool and fun sounds, such as "Om Telolet Om!", a popular phrase among Indonesian bus enthusiasts.
      • -
      • You can save your data online and use your own 3D model using the vehicle mod system.
      • -
      • You can join online multiplayer convoys with other players and chat with them.
      • -
      -

      How to play the game

      -

      To play Simulator Bus Indonesia, you need to download and install the game from Google Play Store or other sources. Then, you need to choose a mode: career or free. In career mode, you need to complete missions and earn money by driving passengers from one place to another. In free mode, you can drive anywhere you want without any restrictions. You can also customize your bus and settings from the menu. To drive your bus, you need to use the controls on the screen, such as gas pedal, brake pedal, steering wheel, horn, lights, wipers, etc. You also need to follow the traffic rules and avoid accidents. You can check your map and GPS for directions and destinations.

      -

      What is APK Mod?

      -

      APK Mod is a modified version of an original APK file that has been altered by someone to add or remove some features from the original app. APK Mod files are usually not available on official app stores, but on third-party websites or platforms.

      -

      download bus simulator indonesia mod apk unlimited money
      -download bus simulator indonesia mod apk latest version
      -download bus simulator indonesia mod apk terbaru
      -download bus simulator indonesia mod apk 2023
      -download bus simulator indonesia mod apk offline
      -download bus simulator indonesia mod apk free shopping
      -download bus simulator indonesia mod apk unlimited fuel
      -download bus simulator indonesia mod apk hack
      -download bus simulator indonesia mod apk android 1
      -download bus simulator indonesia mod apk revdl
      -download bus simulator indonesia mod apk rexdl
      -download bus simulator indonesia mod apk happymod
      -download bus simulator indonesia mod apk no ads
      -download bus simulator indonesia mod apk obb
      -download bus simulator indonesia mod apk data
      -download bus simulator indonesia mod apk full unlocked
      -download bus simulator indonesia mod apk all skin
      -download bus simulator indonesia mod apk all livery
      -download bus simulator indonesia mod apk all map
      -download bus simulator indonesia mod apk all vehicle
      -download bus simulator indonesia mod apk anti banned
      -download bus simulator indonesia mod apk cheat menu
      -download bus simulator indonesia mod apk cheat engine
      -download bus simulator indonesia mod apk custom horn
      -download bus simulator indonesia mod apk custom music
      -download bus simulator indonesia mod apk custom sticker
      -download bus simulator indonesia mod apk custom traffic
      -download bus simulator indonesia mod apk custom weather
      -download bus simulator indonesia mod apk easy win
      -download bus simulator indonesia mod apk endless money
      -download bus simulator indonesia mod apk free fire skin
      -download bus simulator indonesia mod apk free horn sound
      -download bus simulator indonesia mod apk free livery bussid shd hd xhd arjuna xhd jetbus hd 3+
      -download bus simulator indonesia mod apk free music player
      -download bus simulator indonesia mod apk free sticker bussid shd hd xhd arjuna xhd jetbus hd 3+
      -download bus simulator indonesia mod apk free traffic bussid shd hd xhd arjuna xhd jetbus hd 3+
      -download bus simulator indonesia mod apk free weather bussid shd hd xhd arjuna xhd jetbus hd 3+
      -download bus simulator indonesia mod apk full feature
      -download bus simulator indonesia mod apk full graphic
      -download bus simulator indonesia mod apk full map bussid shd hd xhd arjuna xhd jetbus hd 3+
      -download bus simulator indonesia mod apk full skin bussid shd hd xhd arjuna xhd jetbus hd 3+
      -download bus simulator indonesia mod apk full vehicle bussid shd hd xhd arjuna xhd jetbus hd 3+
      -download bus simulator indonesia mod apk game guardian
      -download bus simulator indonesia mod apk high graphic
      -download bus simulator indonesia mod apk idbs studio
      -download bus simulator indonesia mod apk maleo studio
      -download bus simulator indonesia mod apk mega link
      -download bus simulator indonesia mod apk no root
      -download bus simulator indonesia mod apk pro version

      -

      Benefits of using APK Mod

      -

      Some of the benefits of using APK Mod are:

      -
        -
      • You can access premium features or content that are otherwise locked or paid in the original app.
      • -
      • You can bypass some restrictions or limitations that are imposed by the original app developer or publisher.
      • -
      • You can enhance your gaming experience with more options and possibilities that are not available in the original app.
      • -
      -

      Risks of using APK Mod

      -

      Some of the risks of using APK Mod are:

      -
        -
      • You may violate the terms and conditions of the original app developer or publisher, which may result in legal actions or bans.
      • -
      • You may expose your device to malware or viruses that may harm your data or system.
      • -
      • You may lose your progress or data if the modded app is not compatible with the original app version or server.
      • -
      • You may face ethical or moral issues if the modded app infringes the rights or interests of the original app developer or publisher.
      • -
      -

      How to download APK Mod Simulator Bus Indonesia

      -

      If you want to download and install the APK mod version of Simulator Bus Indonesia, you need to follow these steps:

      -

      Steps to download and install the mod

      -
        -
      1. Find a reliable and trustworthy website or platform that offers the APK mod file of Simulator Bus Indonesia. You can search online or ask for recommendations from other players. Some of the popular websites that provide APK mod files are APKPure, APKMody, Rexdl, etc.
      2. -
      3. Download the APK mod file from the website or platform. Make sure you have enough storage space on your device and a stable internet connection. You may also need to enable the option of "Unknown sources" in your device settings to allow the installation of apps from sources other than Google Play Store.
      4. -
      5. Locate the downloaded APK mod file on your device and tap on it to start the installation process. Follow the instructions on the screen and grant the necessary permissions to the app.
      6. -
      7. Wait for the installation to complete and then launch the app from your device menu. You may need to verify your account or log in with your credentials if required.
      8. -
      9. Enjoy the modded version of Simulator Bus Indonesia with more features and options.
      10. -
      -

      Tips to enjoy the modded game

      -

      Some of the tips to enjoy the modded version of Simulator Bus Indonesia are:

      -
        -
      • Make sure you have a compatible device and operating system that can run the modded app smoothly and without errors.
      • -
      • Check for updates or patches regularly to ensure that your modded app is up to date and working properly.
      • -
      • Be careful when using online multiplayer features or connecting with other players, as you may encounter hackers or cheaters who may ruin your gaming experience.
      • -
      • Do not use the modded app for illegal or unethical purposes, such as hacking, cheating, or stealing data from other players or servers.
      • -
      • Respect the original app developer and publisher and support them if you like their work. You can also buy the original app from Google Play Store or other sources if you want to support them financially.
      • -
      -

      Conclusion

      -

      In conclusion, Simulator Bus Indonesia is a fun and realistic bus driving game that lets you explore various Indonesian cities and places. You can also download an APK mod version of the game that gives you more features and options, such as unlimited money, unlocked buses, custom liveries, etc. However, you need to be careful when downloading and installing the APK mod file, as it may come with some risks and challenges. You also need to follow some tips to enjoy the modded game safely and ethically. We hope this article has helped you learn how to download APK mod Simulator Bus Indonesia and have a great time playing it.

      -

      FAQs

      -

      Here are some frequently asked questions about Simulator Bus Indonesia and its APK mod version:

      -

      Q: Is Simulator Bus Indonesia free?

      -

      A: Yes, Simulator Bus Indonesia is free to download and play from Google Play Store or other sources. However, it may contain some in-app purchases or ads that require real money.

      -

      Q: Is APK Mod Simulator Bus Indonesia safe?

      -

      A: It depends on where you download it from and how you use it. Some websites or platforms may provide safe and secure APK mod files, while others may contain malware or viruses that may harm your device or data. You also need to be careful when using online multiplayer features or connecting with other players, as you may encounter hackers or cheaters who may ruin your gaming experience. You also need to respect the original app developer and publisher and not use the modded app for illegal or unethical purposes.

      -

      Q: How can I update APK Mod Simulator Bus Indonesia?

      -

      A: You can update APK Mod Simulator Bus Indonesia by downloading and installing the latest version of the APK mod file from the same website or platform where you got it from. You may also need to uninstall the previous version of the app before installing the new one. However, some updates or patches may not be compatible with your modded app, so you need to check before updating.

      -

      Q: How can I uninstall APK Mod Simulator Bus Indonesia?

      -

      A: You can uninstall APK Mod Simulator Bus Indonesia by following these steps:

      -
        -
      1. Go to your device settings and find the option of "Apps" or "Applications".
      2. -
      3. Find and tap on "Simulator Bus Indonesia" or "Simulator Bus Indonesia Mod" from the list of apps.
      4. -
      5. Tap on the option of "Uninstall" and confirm your action.
      6. -
      7. Wait for the uninstallation to complete and then check your device menu to make sure the app is gone.
      8. -
      -

      Q: Where can I get more information or help about Simulator Bus Indonesia or its APK mod version?

      -

      A: You can get more information or help about Simulator Bus Indonesia or its APK mod version from these sources:

      -
        -
      • The official website of the game developer, Maleo, at [https://maleo.id/].
      • -
      • The official Facebook page of the game, at [https://www.facebook.com/BUSSIDGAME/].
      • -
      • The official Instagram account of the game, at [https://www.instagram.com/bussimulatorid/].
      • -
      • The official YouTube channel of the game, at [https://www.youtube.com/channel/UCdpdj7ZGO8i3cLyt_0MpkAg].
      • -
      • The online forums or communities of the game, such as [https://bussidgame.com/] or [https://www.reddit.com/r/BUSSID/].
      • -

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Disney Heroes Battle Mode - Collect Upgrade and Battle with 100 Disney and Pixar Heroes.md b/spaces/fatiXbelha/sd/Disney Heroes Battle Mode - Collect Upgrade and Battle with 100 Disney and Pixar Heroes.md deleted file mode 100644 index 32301d1c274c8d3119bc7b24921e914a751c0804..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Disney Heroes Battle Mode - Collect Upgrade and Battle with 100 Disney and Pixar Heroes.md +++ /dev/null @@ -1,73 +0,0 @@ - -

      Disney Heroes Battle Mode Download: How to Play This Epic RPG Game on Your Device

      | | H2: Introduction |

      Introduction

      Do you love Disney and Pixar movies? Do you enjoy playing strategy RPG games? If you answered yes to both questions, then you will love Disney Heroes Battle Mode. This is a game that lets you collect and battle with over 100 Disney and Pixar heroes from your favorite films. You can team up with other players for missions and campaigns, upgrade your characters with abilities and gear, and explore a new digital world that is under attack by a virus.

      -

      disney heroes battle mode download


      Download 🌟 https://urllie.com/2uNw78



      But how can you download and play this game on your device? In this article, we will show you how to do that in a few simple steps. We will also give you some tips and tricks on how to get started with this game and have fun with your favorite Disney and Pixar heroes.

      | | H2: What is Disney Heroes Battle Mode? |

      What is Disney Heroes Battle Mode?

      Disney Heroes Battle Mode is a free-to-play action-packed strategy RPG game that was released in 2018 by PerBlue Entertainment. The game features characters from various Disney and Pixar films, such as The Incredibles, Wreck-It Ralph, Toy Story, Zootopia, Monsters Inc., The Nightmare Before Christmas, The Lion King, Pirates of the Caribbean, Frozen, Moana, Big Hero 6, Wall-E, Aladdin, Beauty and the Beast, Alice in Wonderland, Mulan, Hercules, Stitch, Mickey Mouse, Timon & Pumbaa, Captain Jack Sparrow, Merlin, Maleficent, Scar, Elsa, Buzz Lightyear, and many more.

      The game's story revolves around Wreck-It Ralph and Vanellope von Schweetz who find a beta key to unlock a mysterious new RPG game inside a massive digital city. However, they soon discover that the city is corrupted by an evil virus that turns everything into enemies. They also meet other heroes who are pulled into the city and need their help to fight back against the virus.

      The game allows you to create your own team of heroes from different films and genres. You can customize their skills and equipment to suit your playstyle and strategy. You can also join forces with other players for cooperative missions and special campaigns. You can also compete with other players in the arena and climb the leaderboards.

      | | H2: How to Download Disney Heroes Battle Mode on Your Device |

      How to Download Disney Heroes Battle Mode on Your Device

      | | H3: For Android Devices |

      For Android Devices

      If you have an Android device (such as a smartphone or tablet), you can download Disney Heroes Battle Mode from Google Play Store. Here are the steps to follow:

      1. Open Google Play Store on your device
      2. Search for Disney Heroes Battle Mode in the search bar
      3. -
      4. Tap on the game icon and then tap on Install
      5. -
      6. Wait for the game to download and install on your device
      7. -
      8. Open the game and enjoy playing with your favorite Disney and Pixar heroes
      -

      Note: The game requires Android 5.0 or higher and at least 1 GB of RAM to run smoothly. The game also requires an internet connection to play.

      -

      disney heroes battle mode pc game
      -how to play disney heroes battle mode on mac
      -disney heroes battle mode apk mod
      -disney heroes battle mode ios app
      -disney heroes battle mode best characters
      -disney heroes battle mode tips and tricks
      -disney heroes battle mode hack no survey
      -disney heroes battle mode latest update
      -disney heroes battle mode online free
      -disney heroes battle mode cheats codes
      -disney heroes battle mode tier list 2023
      -disney heroes battle mode guild war rewards
      -disney heroes battle mode reddit community
      -disney heroes battle mode support email
      -disney heroes battle mode wiki fandom
      -disney heroes battle mode frozen team
      -disney heroes battle mode incredibles team
      -disney heroes battle mode toy story team
      -disney heroes battle mode zootopia team
      -disney heroes battle mode wreck it ralph team
      -disney heroes battle mode alice in wonderland team
      -disney heroes battle mode pirates of the caribbean team
      -disney heroes battle mode beauty and the beast team
      -disney heroes battle mode lion king team
      -disney heroes battle mode nightmare before christmas team
      -disney heroes battle mode tron legacy team
      -disney heroes battle mode aladdin team
      -disney heroes battle mode mulan team
      -disney heroes battle mode moana team
      -disney heroes battle mode big hero 6 team
      -disney heroes battle mode wall-e team
      -disney heroes battle mode robin hood team
      -disney heroes battle mode hercules team
      -disney heroes battle mode merlin team
      -disney heroes battle mode mickey mouse team
      -disney heroes battle mode stitch team
      -disney heroes battle mode buzz lightyear team
      -disney heroes battle mode elsa team
      -disney heroes battle mode scar team
      -disney heroes battle mode maleficent team
      -disney heroes battle mode jack sparrow team
      -disney heroes battle mode woody team
      -disney heroes battle mode timon and pumbaa team
      -disney heroes battle mode baymax team
      -disney heroes battle mode eve team
      -disney heroes battle mode mr incredible team
      -disney heroes battle mode elastigirl team
      -disney heroes battle mode chief bogo team

      | H3: For iOS Devices |

      For iOS Devices

      If you have an iOS device (such as an iPhone or iPad), you can download Disney Heroes Battle Mode from App Store. Here are the steps to follow:

      1. Open App Store on your device
      2. -
      3. Search for Disney Heroes Battle Mode in the search bar
      4. -
      5. Tap on the game icon and then tap on Get
      6. -
      7. Wait for the game to download and install on your device
      8. -
      9. Open the game and enjoy playing with your favorite Disney and Pixar heroes
      -

      Note: The game requires iOS 9.0 or later and at least 1 GB of RAM to run smoothly. The game also requires an internet connection to play.

      | H2: How to Choose the Best Characters for Your Team |

      How to Choose the Best Characters for Your Team

      Now that you know how to download and play Disney Heroes Battle Mode on your device, you might be wondering how to choose the best characters for your team. After all, there are over 100 heroes to collect and unlock, each with their own skills, roles, and synergies. How can you decide which ones to use and which ones to bench?

      Well, there are a few factors to consider when choosing your team, such as:

      • The roles of the characters: As we mentioned before, there are four main roles in the game: Tank, Damage, Support, and Control. Each role has a different function and contribution to the team. Tanks are the frontline heroes who can absorb damage and protect their allies. Damage heroes are the ones who deal the most damage and eliminate enemies. Support heroes are the ones who heal, buff, or shield their allies. Control heroes are the ones who debuff, stun, or manipulate enemies. Ideally, you want to have a balanced team that covers all these roles.
      • -
      • The tiers of the characters: Not all characters are created equal in Disney Heroes Battle Mode. Some are more powerful, versatile, or useful than others. That's why there are tier lists that rank the characters from best to worst based on their performance and popularity in the game. You can find many tier lists online, such as [this one](^6^), [this one](^7^), or [this one](^8^). Of course, tier lists are not absolute and may vary depending on your preferences and playstyle. But they can give you a general idea of which characters are worth investing in and which ones are not.
      • -
      • The synergies of the characters: Another important factor to consider when choosing your team is how well the characters work together. Some characters have skills that complement or enhance each other, creating powerful combos and effects. For example, Elsa and Olaf have skills that freeze enemies, making them vulnerable to extra damage from other frozen heroes. Or Stitch and Hiro have skills that sap enemies, reducing their energy gain and making them easier to control. You can find out which characters have synergies by checking their friendship disks or their collection buffs.
      -

      By taking into account these factors, you can create a strong and effective team that can handle any challenge in Disney Heroes Battle Mode.

      | H2: How to Play Disney Heroes Battle Mode Like a Pro |

      How to Play Disney Heroes Battle Mode Like a Pro

      Now that you have downloaded the game and chosen your team, you might be wondering how to play Disney Heroes Battle Mode like a pro. Well, there are some tips and tricks that can help you improve your skills and strategies in this game. Here are some of them:

      • Learn the basics of combat: The combat in Disney Heroes Battle Mode is mostly automatic, meaning that your heroes will attack and use their skills on their own. However, you can still influence the outcome of the battle by using your heroes' white skills manually. These are the skills that have a blue bar under them, and they can be activated by tapping on the hero's portrait. White skills are usually the most powerful and impactful skills in the game, so you should use them wisely and at the right time. For example, you can use them to finish off a low-health enemy, to counter an enemy's skill, or to turn the tide of the battle in your favor.
      • -
      • Upgrade your heroes regularly: As we mentioned before, leveling up your heroes is one of the best ways to increase their power and stats. However, there are other ways to upgrade your heroes as well, such as enhancing their skills, equipping them with badges, promoting their rarity, unlocking their friendship disks, and increasing their mods. All of these methods require different resources and materials that you can obtain by playing the game and completing various tasks. You should always try to upgrade your heroes as much as possible, as this will make them stronger and more versatile in combat.
      • -
      • Explore the different game modes: Disney Heroes Battle Mode has a lot of content and features to offer, besides the main campaign mode. There are also other game modes that you can play and enjoy, such as the arena, the coliseum, the city watch, the surge, the heist, the trials, the port, the invasion, and more. Each game mode has its own rules, rewards, and challenges that will test your skills and strategies. You should try to play all of them regularly, as they will help you earn more resources, materials, and rewards that you can use to upgrade your heroes and progress faster in the game.
      • -
      • Join a guild and make friends: Disney Heroes Battle Mode is not only a solo game, but also a social game. You can join a guild and make friends with other players who share your passion for Disney and Pixar films. By joining a guild, you can access some exclusive features and benefits, such as guild chat, guild perks, guild wars, guild contests, and more. You can also cooperate with your guildmates for some game modes, such as the surge or the invasion. Moreover, you can make friends with other players by sending them friend requests or messages. You can also unlock friendship campaigns with some heroes by befriending them or their partners.
      -

      By following these tips and tricks, you can play Disney Heroes Battle Mode like a pro and have more fun with this epic RPG game.

      | H2: Conclusion |

      Conclusion

      Disney Heroes Battle Mode is a game that every Disney and Pixar fan should try. It is a game that combines the best of both worlds: the charm and nostalgia of your favorite films, and the thrill and strategy of RPG games. You can download and play this game on your Android or iOS device for free, and enjoy collecting and battling with over 100 heroes from different films and genres. You can also upgrade your heroes, explore different game modes, join a guild, and make friends with other players. Disney Heroes Battle Mode is a game that will keep you entertained and engaged for hours.

      So what are you waiting for? Download Disney Heroes Battle Mode today and join the epic adventure with your favorite Disney and Pixar heroes!

      | | H2: FAQs |

      FAQs

      Here are some frequently asked questions about Disney Heroes Battle Mode:

      1. How can I get more heroes in Disney Heroes Battle Mode?
        You can get more heroes in Disney Heroes Battle Mode by opening crates, completing missions, participating in events, or buying them with in-game currency. You can also unlock some heroes by completing their friendship campaigns.
      2. -
      3. How can I get more resources and materials in Disney Heroes Battle Mode?
        You can get more resources and materials in Disney Heroes Battle Mode by playing the game regularly, completing various tasks, winning battles, opening rewards, or buying them with real money.
      4. -
      5. How can I contact the support team of Disney Heroes Battle Mode?
        You can contact the support team of Disney Heroes Battle Mode by tapping on the settings icon on the main screen, then tapping on support, and then tapping on contact us. You can also visit their official website or forum for more information.
      6. -
      7. Is Disney Heroes Battle Mode safe for kids?
        Disney Heroes Battle Mode is rated 9+ on App Store and Everyone 10+ on Google Play Store. The game contains cartoon violence, mild language, and in-app purchases. Parents should supervise their kids when playing this game and set parental controls if needed.
      8. -
      9. Can I play Disney Heroes Battle Mode offline?
        No, you cannot play Disney Heroes Battle Mode offline. The game requires an internet connection to play.

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download 60 Second Atomic Adventure APK OBB Gratis A Survival Game with a Twist.md b/spaces/fatiXbelha/sd/Download 60 Second Atomic Adventure APK OBB Gratis A Survival Game with a Twist.md deleted file mode 100644 index 332b4ed2ec17142eea340378ad23241de36cd045..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download 60 Second Atomic Adventure APK OBB Gratis A Survival Game with a Twist.md +++ /dev/null @@ -1,96 +0,0 @@ - -

      Download 60 Second Atomic Adventure APK OBB Gratis: A Guide

      -

      If you are looking for a fun and challenging game that will test your survival skills in a post-apocalyptic scenario, then you might want to try 60 Second Atomic Adventure. This is a game that will make you think fast, act smart, and deal with the consequences of your choices. But what if you don't want to pay for the game or deal with annoying ads and in-app purchases? Is there a way to download 60 Second Atomic Adventure APK OBB gratis? In this article, we will answer these questions and more. We will tell you what 60 Second Atomic Adventure is, why you might want to download it for free, and how to do it safely and easily.

      -

      download 60 second atomic adventure apk obb gratis


      Download Zip ✺✺✺ https://urllie.com/2uNBIQ



      -

      What is 60 Second Atomic Adventure?

      -

      60 Second Atomic Adventure is a dark comedy adventure game developed by Robot Gentleman. It was released in 2015 for Windows and Mac, and later ported to iOS and Android devices. The game is set in a fictional 1950s America, where a nuclear war is about to break out. You play as Ted, a family man who has only 60 seconds to gather his family and some essential items before rushing to the fallout shelter in his backyard. Once there, you have to survive as long as possible with limited resources and random events. The game has multiple endings depending on your actions and choices.

      -

      A brief introduction to the game and its features

      -

      60 Second Atomic Adventure is a game that combines elements of strategy, simulation, and roguelike genres. It has four main features:

      -
        -
      • A unique art style that blends pixel art with hand-drawn illustrations
      • -
      • A dynamic soundtrack that adapts to the situation and mood of the game
      • -
      • A humorous and satirical tone that mocks the Cold War era and its propaganda
      • -
      • A replayable gameplay that offers different scenarios and outcomes every time
      • -
      -

      How to play 60 Second Atomic Adventure

      -

      The game has four different modes that you can choose from:

      -

      The main mode: Atomic Drill

      -

      This is the default mode of the game, where you have to complete two phases: scavenge and survive. In the scavenge phase, you have 60 seconds to grab as many items as you can from your house before heading to the shelter. You have to decide what items are essential for your survival, such as food, water, medicine, weapons, tools, etc. You also have to choose which family members you want to take with you: your wife Dolores, your son Timmy, your daughter Mary Jane, or your pet (a dog or a cat). You can also find some special items that can unlock new events or endings in the game.

      -

      60 second atomic adventure apk obb free download
      -download 60 seconds! atomic adventure full apk + obb
      -60 second atomic adventure mod apk obb download gratis
      -how to download 60 second atomic adventure apk obb for free
      -60 second atomic adventure apk + obb latest version download
      -download 60 second atomic adventure android apk obb gratis
      -60 second atomic adventure apk obb offline download free
      -60 second atomic adventure apk obb unlimited money download
      -download 60 second atomic adventure apk + obb data gratis
      -60 second atomic adventure apk obb highly compressed download
      -60 second atomic adventure premium apk obb free download
      -download 60 seconds! atomic adventure 1.3.107 apk full + obb
      -60 second atomic adventure cracked apk obb download gratis
      -where to download 60 second atomic adventure apk obb for free
      -60 second atomic adventure apk + obb file download free
      -download 60 second atomic adventure modded apk obb gratis
      -60 second atomic adventure hack apk obb download free
      -download 60 seconds! atomic adventure android game apk + obb
      -60 second atomic adventure full unlocked apk obb download
      -download 60 second atomic adventure survival game apk obb gratis
      -60 second atomic adventure pro apk obb free download
      -download 60 seconds! atomic adventure hd apk + obb data
      -60 second atomic adventure cheat apk obb download gratis
      -how to install 60 second atomic adventure apk obb for free
      -60 second atomic adventure apk + obb mega download free
      -download 60 second atomic adventure paid apk obb gratis
      -60 second atomic adventure patch apk obb download free
      -download 60 seconds! atomic adventure mod apk + obb unlimited everything
      -60 second atomic adventure unlocked apk obb download gratis
      -how to play 60 second atomic adventure apk obb for free
      -60 second atomic adventure apk + obb mediafire download free
      -download 60 second atomic adventure original apk obb gratis
      -60 second atomic adventure update apk obb download free
      -download 60 seconds! atomic adventure pc game apk + obb emulator
      -60 second atomic adventure no ads apk obb download gratis
      -how to fix 60 second atomic adventure apk obb for free
      -60 second atomic adventure apk + obb google drive download free
      -download 60 second atomic adventure new version apk obb gratis
      -60 second atomic adventure tips and tricks apk obb download free
      -download 60 seconds! atomic adventure gameplay video apk + obb youtube

      -

      In the survive phase, you have to manage your resources and make decisions that will affect your fate. You have to ration your food and water, deal with injuries and illnesses, fend off raiders and can play it on most Android devices. The game does not have high system requirements or specifications. You can run it smoothly and easily on your phone or tablet.

      -

      The risks of downloading the game for free

      -

      However, downloading 60 Second Atomic Adventure APK OBB gratis also comes with some risks that you should be aware of, such as:

      -

      Potential malware, viruses, or spyware

      -

      One of the dangers of downloading the game for free is that you might expose your device to malware, viruses, or spyware. These are malicious programs that can harm your device, steal your data, or compromise your security. You might download a corrupted or infected file that can damage your device or cause it to malfunction.

      -

      Legal issues or copyright infringement

      -

      Another risk of downloading the game for free is that you might violate the law or the rights of the developers. 60 Second Atomic Adventure is a copyrighted game that belongs to Robot Gentleman. By downloading it for free, you are breaking the terms and conditions of the game and the Google Play Store. You might face legal consequences or penalties for piracy or theft.

      -

      Poor performance or compatibility issues

      -

      A final risk of downloading the game for free is that you might experience poor performance or compatibility issues. The game might not work properly or at all on your device. You might encounter bugs, glitches, crashes, or errors. The game might not be compatible with your device model, operating system, or software version.

      -

      How to download 60 Second Atomic Adventure APK OBB gratis?

      -

      If you still want to download 60 Second Atomic Adventure APK OBB gratis, despite the risks, you need to follow some steps to do it safely and easily. Here are the steps you need to take:

      -

      The steps to download and install the game for free

      -

      To download and install 60 Second Atomic Adventure APK OBB gratis, you need to do the following:

      -

      Find a reliable source for the APK OBB files

      -

      The first step is to find a reliable source for the APK OBB files of the game. These are the files that contain the game data and installation package. You need to find a website that offers these files for free and without any viruses or malware. You can search online for reviews, ratings, or feedback from other users who have downloaded the files before. You can also use a trusted antivirus program to scan the files before downloading them.

      -

      Enable unknown sources on your device settings

      -

      The second step is to enable unknown sources on your device settings. This is a security feature that prevents you from installing apps from sources other than the Google Play Store. You need to disable this feature temporarily to install the APK file of the game. To do this, go to your device settings, then security, then unknown sources, and toggle it on.

      -

      Download and install the APK file first, then the OBB file second

      -

      The third step is to download and install the APK file first, then the OBB file second. The APK file is the installation package of the game, while the OBB file is the data file of the game. You need to download both files and save them in your device storage. Then, you need to install the APK file by tapping on it and following the instructions. After that, you need to copy and paste the OBB file into the Android/OBB folder in your device storage.

      -

      Launch the game and enjoy

      -

      The final step is to launch the game and enjoy. You can find the game icon on your home screen or app drawer. Tap on it and start playing 60 Second Atomic Adventure for free.

      -

      Conclusion

      -

      60 Second Atomic Adventure is a fun and challenging game that will test your survival skills in a post-apocalyptic scenario. You can download it for free by following some steps, but you should also be aware of some risks. If you want to play 60 Second Atomic Adventure APK OBB gratis, make sure you do it safely and legally.

      -

      Frequently Asked Questions

      -
        -
      • What are the minimum requirements to play 60 Second Atomic Adventure on Android?
      • -
      • The minimum requirements to play 60 Second Atomic Adventure on Android are: Android 4.1 or higher, 1 GB of RAM, and 200 MB of free space.
      • -
      • What are some tips and tricks to play 60 Second Atomic Adventure?
      • -
      • Some tips and tricks to play 60 Second Atomic Adventure are: plan ahead before scavenging, prioritize food and water over other items, use items wisely and sparingly, explore the wasteland carefully and cautiously, and try different endings and modes.
      • -
      • What are some alternatives to 60 Second Atomic Adventure?
      • -
      • Some alternatives to 60 Second Atomic Adventure are: Fallout Shelter, This War of Mine, Sheltered, and 60 Parsecs.
      • -
      • Is 60 Second Atomic Adventure available for other platforms?
      • -
      • Yes, 60 Second Atomic Adventure is also available for Windows, Mac, iOS, and Nintendo Switch.
      • -
      • Is 60 Second Atomic Adventure a multiplayer game?
      • -
      • No, 60 Second Atomic Adventure is a single-player game. However, you can share your screenshots and stories with other players online.
      • -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Love by Ali Jita - A Song Produced by the Intelligent Beat Maker Don Adah.md b/spaces/fatiXbelha/sd/Download Love by Ali Jita - A Song Produced by the Intelligent Beat Maker Don Adah.md deleted file mode 100644 index 2a9ab293d777270be506641e92ed92d42336f03d..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Love by Ali Jita - A Song Produced by the Intelligent Beat Maker Don Adah.md +++ /dev/null @@ -1,117 +0,0 @@ - -

      How to Download Love by Ali Jita

      -

      If you are a fan of Hausa music, you might have heard of love by ali jita, a romantic song by one of the most popular Hausa musicians in Nigeria. The song was released in 2018 and has been ranked by BBC Hausa as the second-best Hausa music of that year. The song has also been used in several Kannywood movies, making it even more popular among Hausa speakers and lovers.

      -

      Downloading music online is a convenient and affordable way to enjoy your favorite songs anytime and anywhere. You can also access a wide variety of music genres and artists from different sources and platforms. However, downloading music online also comes with some challenges, such as finding reliable and legal websites, avoiding malware and viruses, and respecting the rights of the artists and creators.

      -

      download love by ali jita


      Download Zip >> https://urllie.com/2uNAXu



      -

      In this article, we will show you how to download love by ali jita from three different sources: iTunes, YouTube, and other websites. We will also provide you with some tips and warnings for downloading music online safely and legally. Follow these simple steps and enjoy listening to love by ali jita on your device.

      -

      How to Download Love by Ali Jita from iTunes

      -

      iTunes is one of the most popular and trusted platforms for buying and downloading music online. You can use iTunes on your computer or smartphone to access millions of songs from various artists and genres. Here is how you can download love by ali jita from iTunes:

      -
        -
      1. Install iTunes on your computer or smartphone. If you are using a Windows computer, you will need to download and install iTunes from Apple's website. If you are using a Mac computer or an iPhone, iTunes will be installed by default.
      2. -
      3. Sign in with your Apple ID and enter your payment information. If you don't have an Apple ID, you will need to create one here. You will also need to enter your credit card or PayPal details to be able to buy music from iTunes.
      4. -
      5. Search for love by ali jita on the iTunes store and click the price to buy it. You can use the search bar at the top-right corner of iTunes to type in the name of the song, album, or artist. You can also browse through different categories and genres of music available on iTunes. - Once you find love by ali jita, you will see the price of the song next to it. Click on the price to buy the song. You can also preview the song by clicking on the play button before buying it.
      6. -
      7. Enter your Apple ID password or use Touch ID to confirm your purchase. You will need to enter your Apple ID password or use your fingerprint to verify your identity and authorize the payment. You will also receive a confirmation email from iTunes with the details of your purchase.
      8. -
      9. View the music file on your device or transfer it to another location or service. After buying the song, you can view it on your device's music library or iTunes library. You can also sync it with other devices or services, such as iCloud, Spotify, or Google Play Music.
      10. -
      -

      Congratulations, you have successfully downloaded love by ali jita from iTunes. Enjoy listening to the song and share it with your friends and family.

      -

      How to Download Love by Ali Jita from YouTube

      -

      YouTube is another popular and convenient platform for watching and listening to music online. You can find almost any song or video on YouTube, including love by ali jita official video. However, YouTube does not allow you to download music directly from its website or app. You will need to use a third-party website or app that allows you to convert YouTube videos into MP3 files and download them to your device. Here is how you can download love by ali jita from YouTube:

      -
        -
      1. Visit YouTube and search for love by ali jita official video. You can use the search bar at the top of YouTube to type in the name of the song, album, or artist. You can also browse through different categories and genres of music available on YouTube.
      2. -
      3. Copy the URL of the video from the address bar or the share button. Once you find love by ali jita official video, you will see the URL of the video on the address bar of your browser or on the share button below the video. Copy the URL by right-clicking on it and selecting copy or by tapping and holding on it and selecting copy.
      4. -
      5. Visit a third-party website or app that allows you to download YouTube videos as MP3 files, such as ytmp3.cc or vidmate. There are many websites and apps that offer this service for free or for a fee. Some of them are ytmp3.cc, vidmate, y2mate, savefrom.net, and 4kdownload. You can use any of them as long as they are reliable and legal.
      6. -
      7. Paste the URL of the video into the search box and click convert or download. Once you visit the website or app of your choice, you will see a search box where you can paste the URL of the video that you copied from YouTube. Paste the URL by right-clicking on it and selecting paste or by tapping and holding on it and selecting paste. Then, click on the convert or download button to start the process.
      8. -
      9. Save the MP3 file to your device or transfer it to another location or service. After converting or downloading the video, you will see a link or a button to save the MP3 file to your device. Click on it and choose a location where you want to save it. You can also transfer it to another device or service, such as Dropbox, Google Drive, or SoundCloud.
      10. -
      -

      Congratulations, you have successfully downloaded love by ali jita from YouTube. Enjoy listening to the song and share it with your friends and family.

      -

      download love by ali jita mp3
      -download love by ali jita video
      -download love by ali jita lyrics
      -download love by ali jita cover
      -download love by ali jita remix
      -download love by ali jita audio
      -download love by ali jita song
      -download love by ali jita album
      -download love by ali jita shazam
      -download love by ali jita youtube
      -download love by ali jita instrumental
      -download love by ali jita ringtone
      -download love by ali jita karaoke
      -download love by ali jita music
      -download love by ali jita hausa song
      -download love by ali jita free mp3
      -download love by ali jita official video
      -download love by ali jita 320kbps
      -download love by ali jita online
      -download love by ali jita latest song
      -download love by ali jita ft hamisu breaker
      -download love by ali jita live performance
      -download love by ali jita dj mix
      -download love by ali jita whatsapp status
      -download love by ali jita mp4 video
      -download love by ali jita produced by drimzbeat
      -download love by ali jita full song
      -download love by ali jita single
      -download love by ali jita arewa angel
      -download love by ali jita mai waka
      -download love by ali jita mata
      -download love by ali jita asha ruwa remix
      -download love by ali jita aure yana da dadi
      -download love by ali jita tambura
      -download love by ali jita super star
      -download love by ali jita halimatu sadiya
      -download love by ali jita gimbiya aisha
      -download love by ali jita call on me
      -download love by ali jita new song 2023
      -download love by ali jita best of 2023 album

      -

      How to Download Love by Ali Jita from Other Sources

      -

      If you are looking for other sources to download love by ali jita, you can also visit other websites that offer free or paid music downloads. These websites may have a larger or more diverse collection of music than iTunes or YouTube. However, they may also have some drawbacks, such as lower quality, limited availability, or legal issues. Here is how you can download love by ali jita from other sources:

      -
        -
      1. Visit other websites that offer free or paid music downloads, such as Bandcamp, DatPiff, Free Music Archive, or The Internet Archive. There are many websites that offer free or paid music downloads for various genres and artists. Some of them are Bandcamp, DatPiff, Free Music Archive, The Internet Archive, Jamendo, SoundClick, and Audiomack. You can use any of them as long as they are reliable and legal.
      2. -
      3. Search for love by ali jita or browse through the categories and genres of music available. Once you visit the website of your choice, you can use the search bar to type in the name of the song, album, or artist. You can also browse through different categories and genres of music available on the website.
      4. -
      5. Click on the download button or link and choose the format and quality of the music file. Once you find love by ali jita, you will see a download button or link next to it. Click on it and choose the format and quality of the music file that you want to download. Some websites may offer different formats, such as MP3, WAV, FLAC, or OGG. Some websites may also offer different quality levels, such as 128 kbps, 256 kbps, or 320 kbps.
      6. -
      7. Save the music file to your device or transfer it to another location or service. After downloading the music file, you will see a link or a button to save it to your device. Click on it and choose a location where you want to save it. You can also transfer it to another device or service, such as Dropbox, Google Drive, or SoundCloud.
      8. -
      -

      Congratulations, you have successfully downloaded love by ali jita from other sources. Enjoy listening to the song and share it with your friends and family.

      -

      Conclusion

      -

      In this article, we have shown you how to download love by ali jita from three different sources: iTunes, YouTube, and other websites. We have also provided you with some tips and warnings for downloading music online safely and legally. Downloading music online is a convenient and affordable way to enjoy your favorite songs anytime and anywhere. However, you should also be aware of the potential risks and challenges that come with it.

      -

      Here are some tips and warnings for downloading music online:

      -
        -
      • Always check the reliability and legality of the website or app that you are using to download music. Avoid websites or apps that are suspicious, unknown, or have negative reviews.
      • -
      • Always respect the rights of the artists and creators of the music that you are downloading. Do not download music that is protected by copyright or that requires permission from the artist or creator.
      • -
      • Always scan the music file that you are downloading for malware or viruses. Use a reputable antivirus software or app to protect your device and data from potential harm.
      • -
      • Always backup your music files to another location or service. Use a cloud storage service or an external hard drive to store your music files in case of loss or damage.
      • -
      • Always enjoy your music responsibly and ethically. Do not use your music for illegal or immoral purposes. Do not share your music with others without their consent or knowledge.
      • -
      -

      We hope that this article has helped you learn how to download love by ali jita from different sources. We also hope that you have enjoyed listening to the song and that you have discovered other Hausa songs and artists that you like. If you have any feedback or suggestions for other music downloads, please let us know in the comments below.

      -

      Frequently Asked Questions

      -

      Here are some frequently asked questions about downloading love by ali jita:

      -
        -
      1. What is love by ali jita?
        Love by ali jita is a romantic song by Ali Jita, one of the most popular Hausa musicians in Nigeria. The song was released in 2018 and has been ranked by BBC Hausa as the second-best Hausa music of that year. The song has also been used in several Kannywood movies, making it even more popular among Hausa speakers and lovers.
      2. -
      3. Who is Ali Jita?
        Ali Jita is a Nigerian singer, songwriter, producer, and actor who specializes in Hausa music. He was born in Kano State in 1983 and started his musical career in 2005. He has released several albums and singles, such as Mata Ku Dau Turame, Love, Soyayya Dadi, Kece Flavour, and Hafeez. He has also won several awards and recognition for his work, such as Arewa Music Award, City People Music Award, MTN Music Award, and Kannywood Award.
      4. -
      5. What is Hausa music?
        Hausa music is a type of music that originates from the Hausa people of northern Nigeria and southern Niger. Hausa music is influenced by Arabic, Islamic, African, and Western musical traditions. Hausa music is characterized by its use of instruments such as drums, flutes, guitars, keyboards, violins, trumpets, saxophones, and synthesizers. Hausa music is also known for its use of lyrics that express love, praise, religion, culture, history, politics, and social issues.
      6. -
      7. What are some other popular Hausa songs and artists?
        Some other popular Hausa songs and artists are:
      8. -
          -
        • Mata Ku Dau Turame by Nura M Inuwa
        • -
        • Soyayya D -adi by Umar M Shareef
        • -
        • Kece Flavour by Ali Jita
        • -
        • Hafeez by Ali Jita
        • -
        • Mai Zamani by Nura M Inuwa
        • -
        • Yarinya by Classiq
        • -
        • Zan Rayu Dake by Morell
        • -
        • Mun Gode by Solomon Lange
        • -
        • Babban Rana by Ibrahim Yala
        • -
        • Yar Fulani by Umar M Shareef
        • -
        • Inda Rai by 2Baba
        • -
        -
      9. How can I download music online safely and legally?
        To download music online safely and legally, you should follow these tips and warnings:
      10. -
          -
        • Always check the reliability and legality of the website or app that you are using to download music. Avoid websites or apps that are suspicious, unknown, or have negative reviews.
        • -
        • Always respect the rights of the artists and creators of the music that you are downloading. Do not download music that is protected by copyright or that requires permission from the artist or creator.
        • -
        • Always scan the music file that you are downloading for malware or viruses. Use a reputable antivirus software or app to protect your device and data from potential harm.
        • -
        • Always backup your music files to another location or service. Use a cloud storage service or an external hard drive to store your music files in case of loss or damage.
        • -
        • Always enjoy your music responsibly and ethically. Do not use your music for illegal or immoral purposes. Do not share your music with others without their consent or knowledge.
        • -
        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/fb700/chat3/crazy_functions/test_project/cpp/cppipc/prod_cons.h b/spaces/fb700/chat3/crazy_functions/test_project/cpp/cppipc/prod_cons.h deleted file mode 100644 index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000 --- a/spaces/fb700/chat3/crazy_functions/test_project/cpp/cppipc/prod_cons.h +++ /dev/null @@ -1,433 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include - -#include "libipc/def.h" - -#include "libipc/platform/detail.h" -#include "libipc/circ/elem_def.h" -#include "libipc/utility/log.h" -#include "libipc/utility/utility.h" - -namespace ipc { - -//////////////////////////////////////////////////////////////// -/// producer-consumer implementation -//////////////////////////////////////////////////////////////// - -template -struct prod_cons_impl; - -template <> -struct prod_cons_impl> { - - template - struct elem_t { - std::aligned_storage_t data_ {}; - }; - - alignas(cache_line_size) std::atomic rd_; // read index - alignas(cache_line_size) std::atomic wt_; // write index - - constexpr circ::u2_t cursor() const noexcept { - return 0; - } - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed)); - if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) { - return false; // full - } - std::forward(f)(&(elems[cur_wt].data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - /** - * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'. - * So we could just disconnect all connections of receiver, and return false. - */ - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(~static_cast(0u)); - return false; - } - - template - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed)); - if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::forward(f)(&(elems[cur_rd].data_)); - std::forward(out)(true); - rd_.fetch_add(1, std::memory_order_release); - return true; - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - if (circ::index_of(cur_rd) == - circ::index_of(wt_.load(std::memory_order_acquire))) { - return false; // empty - } - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> - : prod_cons_impl> { - - using flag_t = std::uint64_t; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - - template - bool push(W* /*wrapper*/, F&& f, E* elems) { - circ::u2_t cur_ct, nxt_ct; - for (unsigned k = 0;;) { - cur_ct = ct_.load(std::memory_order_relaxed); - if (circ::index_of(nxt_ct = cur_ct + 1) == - circ::index_of(rd_.load(std::memory_order_acquire))) { - return false; // full - } - if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - auto* el = elems + circ::index_of(cur_ct); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - while (1) { - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if (cur_ct != wt_.load(std::memory_order_relaxed)) { - return true; - } - if ((~cac_ct) != cur_ct) { - return true; - } - if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) { - return true; - } - wt_.store(nxt_ct, std::memory_order_release); - cur_ct = nxt_ct; - nxt_ct = cur_ct + 1; - el = elems + circ::index_of(cur_ct); - } - return true; - } - - template - bool force_push(W* wrapper, F&&, E*) { - wrapper->elems()->disconnect_receiver(1); - return false; - } - - template class E, std::size_t DS, std::size_t AS> - bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) { - byte_t buff[DS]; - for (unsigned k = 0;;) { - auto cur_rd = rd_.load(std::memory_order_relaxed); - auto cur_wt = wt_.load(std::memory_order_acquire); - auto id_rd = circ::index_of(cur_rd); - auto id_wt = circ::index_of(cur_wt); - if (id_rd == id_wt) { - auto* el = elems + id_wt; - auto cac_ct = el->f_ct_.load(std::memory_order_acquire); - if ((~cac_ct) != cur_wt) { - return false; // empty - } - if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) { - wt_.store(cur_wt + 1, std::memory_order_release); - } - k = 0; - } - else { - std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff)); - if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) { - std::forward(f)(buff); - std::forward(out)(true); - return true; - } - ipc::yield(k); - } - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - - enum : rc_t { - ep_mask = 0x00000000ffffffffull, - ep_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - }; - - alignas(cache_line_size) std::atomic wt_; // write index - alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer - - circ::u2_t cursor() const noexcept { - return wt_.load(std::memory_order_acquire); - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) { - return false; // has not finished yet - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - epoch_ += ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(wt_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & ep_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) { - break; - } - ipc::yield(k); - } - std::forward(f)(&(el->data_)); - wt_.fetch_add(1, std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) { - if (cur == cursor()) return false; // acquire - auto* el = elems + circ::index_of(cur++); - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & ep_mask) == 0) { - std::forward(out)(true); - return true; - } - auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id()); - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)((nxt_rc & ep_mask) == 0); - return true; - } - ipc::yield(k); - } - } -}; - -template <> -struct prod_cons_impl> { - - using rc_t = std::uint64_t; - using flag_t = std::uint64_t; - - enum : rc_t { - rc_mask = 0x00000000ffffffffull, - ep_mask = 0x00ffffffffffffffull, - ep_incr = 0x0100000000000000ull, - ic_mask = 0xff000000ffffffffull, - ic_incr = 0x0000000100000000ull - }; - - template - struct elem_t { - std::aligned_storage_t data_ {}; - std::atomic rc_ { 0 }; // read-counter - std::atomic f_ct_ { 0 }; // commit flag - }; - - alignas(cache_line_size) std::atomic ct_; // commit index - alignas(cache_line_size) std::atomic epoch_ { 0 }; - - circ::u2_t cursor() const noexcept { - return ct_.load(std::memory_order_acquire); - } - - constexpr static rc_t inc_rc(rc_t rc) noexcept { - return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask); - } - - constexpr static rc_t inc_mask(rc_t rc) noexcept { - return inc_rc(rc) & ~rc_mask; - } - - template - bool push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.load(std::memory_order_acquire); - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_relaxed); - circ::cc_t rem_cc = cur_rc & rc_mask; - if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) { - return false; // has not finished yet - } - else if (!rem_cc) { - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if ((cur_fl != cur_ct) && cur_fl) { - return false; // full - } - } - // consider rem_cc to be 0 here - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) && - epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) { - break; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool force_push(W* wrapper, F&& f, E* elems) { - E* el; - circ::u2_t cur_ct; - rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - for (unsigned k = 0;;) { - circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed); - if (cc == 0) return false; // no reader - el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed)); - // check all consumers have finished reading this element - auto cur_rc = el->rc_.load(std::memory_order_acquire); - circ::cc_t rem_cc = cur_rc & rc_mask; - if (cc & rem_cc) { - ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc); - cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers - if (cc == 0) return false; // no reader - } - // just compare & exchange - if (el->rc_.compare_exchange_weak( - cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) { - if (epoch == epoch_.load(std::memory_order_acquire)) { - break; - } - else if (push(wrapper, std::forward(f), elems)) { - return true; - } - epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr; - } - ipc::yield(k); - } - // only one thread/process would touch here at one time - ct_.store(cur_ct + 1, std::memory_order_release); - std::forward(f)(&(el->data_)); - // set flag & try update wt - el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release); - return true; - } - - template - bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) { - auto* el = elems + circ::index_of(cur); - auto cur_fl = el->f_ct_.load(std::memory_order_acquire); - if (cur_fl != ~static_cast(cur)) { - return false; // empty - } - ++cur; - std::forward(f)(&(el->data_)); - for (unsigned k = 0;;) { - auto cur_rc = el->rc_.load(std::memory_order_acquire); - if ((cur_rc & rc_mask) == 0) { - std::forward(out)(true); - el->f_ct_.store(cur + N - 1, std::memory_order_release); - return true; - } - auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id()); - bool last_one = false; - if ((last_one = (nxt_rc & rc_mask) == 0)) { - el->f_ct_.store(cur + N - 1, std::memory_order_release); - } - if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) { - std::forward(out)(last_one); - return true; - } - ipc::yield(k); - } - } -}; - -} // namespace ipc diff --git a/spaces/fclong/summary/fengshen/models/megatron_t5/__init__.py b/spaces/fclong/summary/fengshen/models/megatron_t5/__init__.py deleted file mode 100644 index 84f78136331c5ef4975697bc6a77910bba7429bd..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/models/megatron_t5/__init__.py +++ /dev/null @@ -1,49 +0,0 @@ -# coding=utf-8 -# Copyright 2021 The IDEA Authors. All rights reserved. - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at - -# http://www.apache.org/licenses/LICENSE-2.0 - -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TYPE_CHECKING - -from transformers.file_utils import _LazyModule, is_torch_available - - -_import_structure = { - "configuration_megatron_t5": ["T5Config"], - "tokenization_megatron_t5": ["T5Tokenizer"], -} - -if is_torch_available(): - _import_structure["modeling_megatron_t5"] = [ - "T5Model", - "T5EncoderModel", - "T5ForConditionalGeneration" - ] - - -if TYPE_CHECKING: - from .configuration_megatron_t5 import T5Config - from .tokenization_megatron_t5 import T5Tokenizer - - if is_torch_available(): - from .modeling_megatron_t5 import ( - T5Model, - T5EncoderModel, - T5ForConditionalGeneration - ) - -else: - import sys - - sys.modules[__name__] = _LazyModule( - __name__, globals()["__file__"], _import_structure) diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/models/encoders/__init__.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/models/encoders/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ferdmartin/GradApplicationDocsApp2/custom_models.py b/spaces/ferdmartin/GradApplicationDocsApp2/custom_models.py deleted file mode 100644 index c06e3b5979569ee1e192f1c6d400ef25d9b49b6a..0000000000000000000000000000000000000000 --- a/spaces/ferdmartin/GradApplicationDocsApp2/custom_models.py +++ /dev/null @@ -1,87 +0,0 @@ -from typing import Optional -from transformers import PreTrainedModel, PretrainedConfig, DistilBertModel, BertModel -import torch -from torch import nn - - -device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - -class TransformerBasedModelDistilBert(nn.Module): - def __init__(self): - super(TransformerBasedModelDistilBert, self).__init__() - self.bert = DistilBertModel.from_pretrained('distilbert-base-uncased') - self.dropout = nn.Dropout(0.55) - self.fc = nn.Linear(768, 2) - - def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): - input_shape = input_ids.size() - if attention_mask is None: - attention_mask = torch.ones(input_shape, device=device) - - outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask) - pooled_output = outputs.last_hidden_state[:, 0, :] - pooled_output = self.dropout(pooled_output) - logits = self.fc(pooled_output) - return logits - -class TransformerBasedModelBert(nn.Module): - def __init__(self): - super(TransformerBasedModelBert, self).__init__() - self.bert = BertModel.from_pretrained('bert-base-uncased') - self.dropout = nn.Dropout(0.55) - self.fc = nn.Linear(768, 2) - - def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): - input_shape = input_ids.size() - if attention_mask is None: - attention_mask = torch.ones(input_shape, device=device) - - outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask) - pooled_output = outputs[1] - pooled_output = self.dropout(pooled_output) - logits = self.fc(pooled_output) - return logits - -class MyConfigDistil(PretrainedConfig): - model_type = "distilbert" - def __init__(self, final_dropout=0.55, **kwargs): - super().__init__(**kwargs) - self.final_dropout = final_dropout - -class MyConfig(PretrainedConfig): - model_type = "bert" - def __init__(self, final_dropout=0.55, **kwargs): - super().__init__(**kwargs) - self.final_dropout = final_dropout - -class MyHFModel_DistilBertBased(PreTrainedModel): - config_class = MyConfigDistil - def __init__(self, config): - super().__init__(config) - self.config = config - self.model = TransformerBasedModelDistilBert() - def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): - input_shape = input_ids.size() - if attention_mask is None: - attention_mask = torch.ones(input_shape, device=device) - - return self.model(input_ids=input_ids, attention_mask=attention_mask) - -class MyHFModel_BertBased(PreTrainedModel): - config_class = MyConfig - def __init__(self, config): - super().__init__(config) - self.config = config - self.model = TransformerBasedModelBert() - def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None): - input_shape = input_ids.size() - if attention_mask is None: - attention_mask = torch.ones(input_shape, device=device) - - return self.model(input_ids=input_ids, attention_mask=attention_mask) - -config = MyConfigDistil(0.55) -HF_DistilBertBasedModelAppDocs = MyHFModel_DistilBertBased(config) - -config_db = MyConfig(0.55) -HF_BertBasedModelAppDocs = MyHFModel_BertBased(config_db) \ No newline at end of file diff --git a/spaces/fffiloni/SplitTrack2MusicGen/tests/modules/test_seanet.py b/spaces/fffiloni/SplitTrack2MusicGen/tests/modules/test_seanet.py deleted file mode 100644 index e5c51b340a2f94fb2828b14daf83d5fad645073d..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/SplitTrack2MusicGen/tests/modules/test_seanet.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product - -import pytest -import torch - -from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock -from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d - - -class TestSEANetModel: - - def test_base(self): - encoder = SEANetEncoder() - decoder = SEANetDecoder() - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_causal(self): - encoder = SEANetEncoder(causal=True) - decoder = SEANetDecoder(causal=True) - x = torch.randn(1, 1, 24000) - - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_conv_skip_connection(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False) - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def test_seanet_encoder_decoder_final_act(self): - encoder = SEANetEncoder(true_skip=False) - decoder = SEANetDecoder(true_skip=False, final_activation='Tanh') - - x = torch.randn(1, 1, 24000) - z = encoder(x) - assert list(z.shape) == [1, 128, 75], z.shape - y = decoder(z) - assert y.shape == x.shape, (x.shape, y.shape) - - def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in encoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - # here we add + 1 to n_blocks as we increment n_blocks just after the block - assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm - - def test_encoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_encoder_blocks_norm(encoder, disable_blocks, norm) - - def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str): - n_blocks = 0 - for layer in decoder.model: - if isinstance(layer, StreamableConv1d): - n_blocks += 1 - assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, StreamableConvTranspose1d): - n_blocks += 1 - assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - elif isinstance(layer, SEANetResnetBlock): - for resnet_layer in layer.block: - if isinstance(resnet_layer, StreamableConv1d): - assert resnet_layer.conv.norm_type == 'none' \ - if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm - - def test_decoder_disable_norm(self): - n_residuals = [0, 1, 3] - disable_blocks = [0, 1, 2, 3, 4, 5, 6] - norms = ['weight_norm', 'none'] - for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms): - decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm, - disable_norm_outer_blocks=disable_blocks) - self._check_decoder_blocks_norm(decoder, disable_blocks, norm) - - def test_disable_norm_raises_exception(self): - # Invalid disable_norm_outer_blocks values raise exceptions - with pytest.raises(AssertionError): - SEANetEncoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) - - with pytest.raises(AssertionError): - SEANetDecoder(disable_norm_outer_blocks=-1) - - with pytest.raises(AssertionError): - SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7) diff --git a/spaces/flax-community/Multilingual-VQA/apps/model/flax_clip_vision_bert/__init__.py b/spaces/flax-community/Multilingual-VQA/apps/model/flax_clip_vision_bert/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/flax-community/roberta-hindi/About/results.md b/spaces/flax-community/roberta-hindi/About/results.md deleted file mode 100644 index 71505284131e2b9de62b2170f2d267fed109b337..0000000000000000000000000000000000000000 --- a/spaces/flax-community/roberta-hindi/About/results.md +++ /dev/null @@ -1,10 +0,0 @@ -## Evaluation Results - -RoBERTa Hindi is evaluated on downstream tasks. The results are summarized below. - -| Task | Task Type | IndicBERT | HindiBERTa | Indic Transformers Hindi BERT | RoBERTa Hindi Guj San | RoBERTa Hindi(ours) | -|-------------------------|----------------------|-----------|------------|-------------------------------|-----------------------|---------------| -| BBC News Classification | Genre Classification | **76.44** | 66.86 | **77.6** | 64.9 | 73.67 | -| WikiNER | Token Classification | - | 90.68 | **95.09** | 89.61 | **92.76** | -| IITP Product Reviews | Sentiment Analysis | **78.01** | 73.23 | **78.39** | 66.16 | 75.53 | -| IITP Movie Reviews | Sentiment Analysis | 60.97 | 52.26 | **70.65** | 49.35 | **61.29** | \ No newline at end of file diff --git a/spaces/floriankrempl/mtg_rules_bot/mtg/utils/__init__.py b/spaces/floriankrempl/mtg_rules_bot/mtg/utils/__init__.py deleted file mode 100644 index 1849b7d2f5782bdc0a90c9362b1e49a17cb0f48d..0000000000000000000000000000000000000000 --- a/spaces/floriankrempl/mtg_rules_bot/mtg/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .utils import get_openai_api_key diff --git a/spaces/florim/MedGPT/autogpt/permanent_memory/__init__.py b/spaces/florim/MedGPT/autogpt/permanent_memory/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/freddyaboulton/llama2-70b-discord-bot/README.md b/spaces/freddyaboulton/llama2-70b-discord-bot/README.md deleted file mode 100644 index d3d306b8cf29e5d8bbdadd4ebba09358ccc5f3dc..0000000000000000000000000000000000000000 --- a/spaces/freddyaboulton/llama2-70b-discord-bot/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Llama2 70b Discord Bot -emoji: 🏆 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.38.0 -app_file: app.py -pinned: false -tags: -- gradio-discord-bot ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/geraldvillaran/dolly-chat/app.py b/spaces/geraldvillaran/dolly-chat/app.py deleted file mode 100644 index 5edcd8bbdfd2095cba693dcb7a7ce19f1ebd94fd..0000000000000000000000000000000000000000 --- a/spaces/geraldvillaran/dolly-chat/app.py +++ /dev/null @@ -1,9 +0,0 @@ -import gradio as gr -import torch -from transformers import pipeline -ans = pipeline(model="databricks/dolly-v2-3b", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto") -def answer(query): - out=ans(query) - return out -Demo = gr.Interface(fn=answer,inputs='text',outputs='text',examples=[['What is the capital of India ?']]) -Demo.launch() diff --git a/spaces/goliathaiconsulting/airbnb-search-engine/README.md b/spaces/goliathaiconsulting/airbnb-search-engine/README.md deleted file mode 100644 index 230f0fb6cae16004bb642eb08ce8f08020958aa7..0000000000000000000000000000000000000000 --- a/spaces/goliathaiconsulting/airbnb-search-engine/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Airbnb Search Engine -emoji: 🐢 -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/gordonchan/h2oo/gradio_utils/__init__.py b/spaces/gordonchan/h2oo/gradio_utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/gossminn/fillmorle-app/sftp/modules/span_typing/span_typing.py b/spaces/gossminn/fillmorle-app/sftp/modules/span_typing/span_typing.py deleted file mode 100644 index 7a9848410d6e354c2d1598b7ee652b293b0a9314..0000000000000000000000000000000000000000 --- a/spaces/gossminn/fillmorle-app/sftp/modules/span_typing/span_typing.py +++ /dev/null @@ -1,64 +0,0 @@ -from abc import ABC -from typing import * - -import torch -from allennlp.common import Registrable -from allennlp.data.vocabulary import DEFAULT_OOV_TOKEN, Vocabulary -from allennlp.training.metrics import CategoricalAccuracy - - -class SpanTyping(Registrable, torch.nn.Module, ABC): - """ - Models the probability p(child_label | child_span, parent_span, parent_label). - """ - def __init__( - self, - n_label: int, - label_to_ignore: Optional[List[int]] = None, - ): - """ - :param label_to_ignore: Label indexes in this list will be ignored. - Usually this should include NULL, PADDING and UNKNOWN. - """ - super().__init__() - self.label_to_ignore = label_to_ignore or list() - self.acc_metric = CategoricalAccuracy() - self.onto = torch.ones([n_label, n_label], dtype=torch.bool) - self.register_buffer('ontology', self.onto) - - def load_ontology(self, path: str, vocab: Vocabulary): - unk_id = vocab.get_token_index(DEFAULT_OOV_TOKEN, 'span_label') - for line in open(path).readlines(): - entities = [vocab.get_token_index(ent, 'span_label') for ent in line.replace('\n', '').split('\t')] - parent, children = entities[0], entities[1:] - if parent == unk_id: - continue - self.onto[parent, :] = False - children = list(filter(lambda x: x != unk_id, children)) - self.onto[parent, children] = True - self.register_buffer('ontology', self.onto) - - def forward( - self, - span_vec: torch.Tensor, - parent_at_span: torch.Tensor, - span_labels: Optional[torch.Tensor], - prediction_only: bool = False, - ) -> Dict[str, torch.Tensor]: - """ - Inputs: All features for typing a child span. - Output: The loss of typing and predictions. - :param span_vec: Shape [batch, span, token_dim] - :param parent_at_span: Shape [batch, span] - :param span_labels: Shape [batch, span] - :param prediction_only: If True, no loss returned & metric will not be updated - :return: - loss: Loss for label prediction. (absent of pred_only = True) - prediction: Predicted labels. - """ - raise NotImplementedError - - def get_metric(self, reset): - return{ - "typing_acc": self.acc_metric.get_metric(reset) * 100 - } diff --git a/spaces/gradio/HuBERT/examples/multilingual/data_scripts/utils/dedup.py b/spaces/gradio/HuBERT/examples/multilingual/data_scripts/utils/dedup.py deleted file mode 100644 index d6fed8c695cf218d3502d6ed8d23015520c0e179..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/multilingual/data_scripts/utils/dedup.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -import argparse - -def deup(src_file, tgt_file, src_file_out, tgt_file_out): - seen = set() - dup_count = 0 - with open(src_file, encoding='utf-8') as fsrc, \ - open(tgt_file, encoding='utf-8') as ftgt, \ - open(src_file_out, 'w', encoding='utf-8') as fsrc_out, \ - open(tgt_file_out, 'w', encoding='utf-8') as ftgt_out: - for s, t in zip(fsrc, ftgt): - if (s, t) not in seen: - fsrc_out.write(s) - ftgt_out.write(t) - seen.add((s, t)) - else: - dup_count += 1 - print(f'number of duplication: {dup_count}') - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--src-file", type=str, required=True, - help="src file") - parser.add_argument("--tgt-file", type=str, required=True, - help="tgt file") - parser.add_argument("--src-file-out", type=str, required=True, - help="src ouptut file") - parser.add_argument("--tgt-file-out", type=str, required=True, - help="tgt ouput file") - args = parser.parse_args() - deup(args.src_file, args.tgt_file, args.src_file_out, args.tgt_file_out) - - -if __name__ == "__main__": - main() diff --git a/spaces/gradio/HuBERT/fairseq/modules/sparse_multihead_attention.py b/spaces/gradio/HuBERT/fairseq/modules/sparse_multihead_attention.py deleted file mode 100644 index 3cbd9d6785886e319aab0601517e27df733b6f97..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/modules/sparse_multihead_attention.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import torch - -from .multihead_attention import MultiheadAttention - - -class SparseMultiheadAttention(MultiheadAttention): - """Sparse Multi-Headed Attention. - - "Generating Long Sequences with Sparse Transformers". Implements - fixed factorized self attention, where l=stride and c=expressivity. - A(1) includes all words in the stride window and A(2) takes a summary of c - words from the end of each stride window. - If is_bidirectional=False, we do not include any words past the current word, - as in the paper. - """ - - def __init__( - self, - embed_dim, - num_heads, - kdim=None, - vdim=None, - dropout=0.0, - bias=True, - add_bias_kv=False, - add_zero_attn=False, - self_attention=False, - encoder_decoder_attention=False, - stride=32, - expressivity=8, - is_bidirectional=True, - ): - - super().__init__( - embed_dim, - num_heads, - kdim, - vdim, - dropout, - bias, - add_bias_kv, - add_zero_attn, - self_attention, - encoder_decoder_attention, - ) - - self.is_bidirectional = is_bidirectional - self.stride = stride - self.expressivity = expressivity - assert self.stride > 0 and self.stride >= self.expressivity - - # Used for Ai(2) calculations - beginning of [l-c, l] range - def compute_checkpoint(self, word_index): - if word_index % self.stride == 0 and word_index != 0: - checkpoint_index = word_index - self.expressivity - else: - checkpoint_index = ( - math.floor(word_index / self.stride) * self.stride - + self.stride - - self.expressivity - ) - return checkpoint_index - - # Computes Ai(2) - def compute_subset_summaries(self, absolute_max): - checkpoint_index = self.compute_checkpoint(0) - subset_two = set() - while checkpoint_index <= absolute_max - 1: - summary = set( - range( - checkpoint_index, - min(checkpoint_index + self.expressivity + 1, absolute_max), - ) - ) - subset_two = subset_two.union(summary) - checkpoint_index = self.compute_checkpoint(checkpoint_index + self.stride) - return subset_two - - # Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf - def compute_fixed_attention_subset(self, word_index, tgt_len): - # +1s account for range function; [min, max) -> [min, max] - if not self.is_bidirectional: - absolute_max = word_index + 1 - else: - absolute_max = tgt_len - - # Subset 1 - whole window - rounded_index = ( - math.floor((word_index + self.stride) / self.stride) * self.stride - ) - if word_index % self.stride == 0 and word_index != 0: - subset_one = set( - range(word_index - self.stride, min(absolute_max, word_index + 1)) - ) - else: - subset_one = set( - range( - max(0, rounded_index - self.stride), - min(absolute_max, rounded_index + 1), - ) - ) - - # Subset 2 - summary per window - # If bidirectional, subset 2 is the same for every index - subset_two = set() - if not self.is_bidirectional: - subset_two = self.compute_subset_summaries(absolute_max) - - return subset_one.union(subset_two) - - # Compute sparse mask - if bidirectional, can pre-compute and store - def buffered_sparse_mask(self, tensor, tgt_len, src_len): - assert tgt_len > self.stride - sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float("-inf")) - - # If bidirectional, subset 2 is the same for every index - subset_summaries = set() - if self.is_bidirectional: - subset_summaries = self.compute_subset_summaries(tgt_len) - - for i in range(tgt_len): - fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len) - fixed_attention_subset = fixed_attention_subset.union(subset_summaries) - included_word_indices = torch.LongTensor(list(fixed_attention_subset)) - sparse_mask[i].index_fill_(0, included_word_indices, 0) - return sparse_mask.type_as(tensor) - - def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): - sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len) - sparse_mask = sparse_mask.unsqueeze(0).expand( - bsz * self.num_heads, tgt_len, src_len - ) - attn_weights += sparse_mask diff --git a/spaces/gradio/chatbot_simple/run.py b/spaces/gradio/chatbot_simple/run.py deleted file mode 100644 index 9344665c7332e12e585d51e77758f0ed41b40797..0000000000000000000000000000000000000000 --- a/spaces/gradio/chatbot_simple/run.py +++ /dev/null @@ -1,19 +0,0 @@ -import gradio as gr -import random -import time - -with gr.Blocks() as demo: - chatbot = gr.Chatbot() - msg = gr.Textbox() - clear = gr.ClearButton([msg, chatbot]) - - def respond(message, chat_history): - bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"]) - chat_history.append((message, bot_message)) - time.sleep(2) - return "", chat_history - - msg.submit(respond, [msg, chatbot], [msg, chatbot]) - -if __name__ == "__main__": - demo.launch() diff --git a/spaces/gwang-kim/DATID-3D/pose_estimation/models/base_model.py b/spaces/gwang-kim/DATID-3D/pose_estimation/models/base_model.py deleted file mode 100644 index 478d6d0d1795c180153ffd4ff7dcfbbaa328b4be..0000000000000000000000000000000000000000 --- a/spaces/gwang-kim/DATID-3D/pose_estimation/models/base_model.py +++ /dev/null @@ -1,317 +0,0 @@ -"""This script defines the base network model for Deep3DFaceRecon_pytorch -""" - -import os -import torch -from collections import OrderedDict -from abc import ABC, abstractmethod -from . import networks - - -class BaseModel(ABC): - """This class is an abstract base class (ABC) for models. - To create a subclass, you need to implement the following five functions: - -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). - -- : unpack data from dataset and apply preprocessing. - -- : produce intermediate results. - -- : calculate losses, gradients, and update network weights. - -- : (optionally) add model-specific options and set default options. - """ - - def __init__(self, opt): - """Initialize the BaseModel class. - - Parameters: - opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions - - When creating your custom class, you need to implement your own initialization. - In this fucntion, you should first call - Then, you need to define four lists: - -- self.loss_names (str list): specify the training losses that you want to plot and save. - -- self.model_names (str list): specify the images that you want to display and save. - -- self.visual_names (str list): define networks used in our training. - -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. - """ - self.opt = opt - self.isTrain = opt.isTrain - self.device = torch.device('cpu') - self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir - self.loss_names = [] - self.model_names = [] - self.visual_names = [] - self.parallel_names = [] - self.optimizers = [] - self.image_paths = [] - self.metric = 0 # used for learning rate policy 'plateau' - - @staticmethod - def dict_grad_hook_factory(add_func=lambda x: x): - saved_dict = dict() - - def hook_gen(name): - def grad_hook(grad): - saved_vals = add_func(grad) - saved_dict[name] = saved_vals - return grad_hook - return hook_gen, saved_dict - - @staticmethod - def modify_commandline_options(parser, is_train): - """Add new model-specific options, and rewrite default values for existing options. - - Parameters: - parser -- original option parser - is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. - - Returns: - the modified parser. - """ - return parser - - @abstractmethod - def set_input(self, input): - """Unpack input data from the dataloader and perform necessary pre-processing steps. - - Parameters: - input (dict): includes the data itself and its metadata information. - """ - pass - - @abstractmethod - def forward(self): - """Run forward pass; called by both functions and .""" - pass - - @abstractmethod - def optimize_parameters(self): - """Calculate losses, gradients, and update network weights; called in every training iteration""" - pass - - def setup(self, opt): - """Load and print networks; create schedulers - - Parameters: - opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions - """ - if self.isTrain: - self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] - - if not self.isTrain or opt.continue_train: - load_suffix = opt.epoch - self.load_networks(load_suffix) - - - # self.print_networks(opt.verbose) - - def parallelize(self, convert_sync_batchnorm=True): - if not self.opt.use_ddp: - for name in self.parallel_names: - if 'renderer' in name: - continue - if isinstance(name, str): - module = getattr(self, name) - setattr(self, name, module.to(self.device)) - else: - for name in self.model_names: - if isinstance(name, str): - module = getattr(self, name) - if convert_sync_batchnorm: - module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module) - setattr(self, name, torch.nn.parallel.DistributedDataParallel(module.to(self.device), - device_ids=[self.device.index], - find_unused_parameters=True, broadcast_buffers=True)) - - # DistributedDataParallel is not needed when a module doesn't have any parameter that requires a gradient. - for name in self.parallel_names: - if isinstance(name, str) and name not in self.model_names: - module = getattr(self, name) - setattr(self, name, module.to(self.device)) - - # put state_dict of optimizer to gpu device - if self.opt.phase != 'test': - if self.opt.continue_train: - for optim in self.optimizers: - for state in optim.state.values(): - for k, v in state.items(): - if isinstance(v, torch.Tensor): - state[k] = v.to(self.device) - - def data_dependent_initialize(self, data): - pass - - def train(self): - """Make models train mode""" - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, name) - net.train() - - def eval(self): - """Make models eval mode""" - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, name) - net.eval() - - def test(self): - """Forward function used in test time. - - This function wraps function in no_grad() so we don't save intermediate steps for backprop - It also calls to produce additional visualization results - """ - with torch.no_grad(): - self.forward() - # self.compute_visuals() - - def compute_visuals(self): - """Calculate additional output images for visdom and HTML visualization""" - pass - - def get_image_paths(self, name='A'): - """ Return image paths that are used to load current data""" - return self.image_paths if name =='A' else self.image_paths_B - - def update_learning_rate(self): - """Update learning rates for all the networks; called at the end of every epoch""" - for scheduler in self.schedulers: - if self.opt.lr_policy == 'plateau': - scheduler.step(self.metric) - else: - scheduler.step() - - lr = self.optimizers[0].param_groups[0]['lr'] - print('learning rate = %.7f' % lr) - - def get_current_visuals(self): - """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" - visual_ret = OrderedDict() - for name in self.visual_names: - if isinstance(name, str): - visual_ret[name] = getattr(self, name)[:, :3, ...] - return visual_ret - - def get_current_losses(self): - """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" - errors_ret = OrderedDict() - for name in self.loss_names: - if isinstance(name, str): - errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number - return errors_ret - - def save_networks(self, epoch): - """Save all the networks to the disk. - - Parameters: - epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) - """ - if not os.path.isdir(self.save_dir): - os.makedirs(self.save_dir) - - save_filename = 'epoch_%s.pth' % (epoch) - save_path = os.path.join(self.save_dir, save_filename) - - save_dict = {} - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, name) - if isinstance(net, torch.nn.DataParallel) or isinstance(net, - torch.nn.parallel.DistributedDataParallel): - net = net.module - save_dict[name] = net.state_dict() - - - for i, optim in enumerate(self.optimizers): - save_dict['opt_%02d'%i] = optim.state_dict() - - for i, sched in enumerate(self.schedulers): - save_dict['sched_%02d'%i] = sched.state_dict() - - torch.save(save_dict, save_path) - - def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): - """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" - key = keys[i] - if i + 1 == len(keys): # at the end, pointing to a parameter/buffer - if module.__class__.__name__.startswith('InstanceNorm') and \ - (key == 'running_mean' or key == 'running_var'): - if getattr(module, key) is None: - state_dict.pop('.'.join(keys)) - if module.__class__.__name__.startswith('InstanceNorm') and \ - (key == 'num_batches_tracked'): - state_dict.pop('.'.join(keys)) - else: - self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) - - def load_networks(self, epoch): - """Load all the networks from the disk. - - Parameters: - epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) - """ - if self.opt.isTrain and self.opt.pretrained_name is not None: - load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name) - else: - load_dir = self.save_dir - load_filename = 'epoch_%s.pth' % (epoch) - load_path = os.path.join(load_dir, load_filename) - state_dict = torch.load(load_path, map_location=self.device) - print('loading the model from %s' % load_path) - - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, name) - if isinstance(net, torch.nn.DataParallel): - net = net.module - net.load_state_dict(state_dict[name]) - - if self.opt.phase != 'test': - if self.opt.continue_train: - print('loading the optim from %s' % load_path) - for i, optim in enumerate(self.optimizers): - optim.load_state_dict(state_dict['opt_%02d'%i]) - - try: - print('loading the sched from %s' % load_path) - for i, sched in enumerate(self.schedulers): - sched.load_state_dict(state_dict['sched_%02d'%i]) - except: - print('Failed to load schedulers, set schedulers according to epoch count manually') - for i, sched in enumerate(self.schedulers): - sched.last_epoch = self.opt.epoch_count - 1 - - - - - def print_networks(self, verbose): - """Print the total number of parameters in the network and (if verbose) network architecture - - Parameters: - verbose (bool) -- if verbose: print the network architecture - """ - print('---------- Networks initialized -------------') - for name in self.model_names: - if isinstance(name, str): - net = getattr(self, name) - num_params = 0 - for param in net.parameters(): - num_params += param.numel() - if verbose: - print(net) - print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) - print('-----------------------------------------------') - - def set_requires_grad(self, nets, requires_grad=False): - """Set requies_grad=Fasle for all the networks to avoid unnecessary computations - Parameters: - nets (network list) -- a list of networks - requires_grad (bool) -- whether the networks require gradients or not - """ - if not isinstance(nets, list): - nets = [nets] - for net in nets: - if net is not None: - for param in net.parameters(): - param.requires_grad = requires_grad - - def generate_visuals_for_evaluation(self, data, mode): - return {} diff --git a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/dnnlib/tflib/optimizer.py b/spaces/gyugnsu/DragGan-Inversion/stylegan_human/dnnlib/tflib/optimizer.py deleted file mode 100644 index cd130a8b5ca8e1af555365620fd01104a3be13ce..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/dnnlib/tflib/optimizer.py +++ /dev/null @@ -1,389 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2019, NVIDIA Corporation. All rights reserved. -# -# This work is made available under the Nvidia Source Code License-NC. -# To view a copy of this license, visit -# https://nvlabs.github.io/stylegan2/license.html - -"""Helper wrapper for a Tensorflow optimizer.""" - -import numpy as np -import tensorflow as tf - -from collections import OrderedDict -from typing import List, Union - -from . import autosummary -from . import tfutil -from .. import util - -from .tfutil import TfExpression, TfExpressionEx - -try: - # TensorFlow 1.13 - from tensorflow.python.ops import nccl_ops -except: - # Older TensorFlow versions - import tensorflow.contrib.nccl as nccl_ops - - -class Optimizer: - """A Wrapper for tf.train.Optimizer. - - Automatically takes care of: - - Gradient averaging for multi-GPU training. - - Gradient accumulation for arbitrarily large minibatches. - - Dynamic loss scaling and typecasts for FP16 training. - - Ignoring corrupted gradients that contain NaNs/Infs. - - Reporting statistics. - - Well-chosen default settings. - """ - - def __init__(self, - # Name string that will appear in TensorFlow graph. - name: str = "Train", - # Underlying optimizer class. - tf_optimizer: str = "tf.train.AdamOptimizer", - # Learning rate. Can vary over time. - learning_rate: TfExpressionEx = 0.001, - # Treat N consecutive minibatches as one by accumulating gradients. - minibatch_multiplier: TfExpressionEx = None, - # Share internal state with a previously created optimizer? - share: "Optimizer" = None, - # Enable dynamic loss scaling for robust mixed-precision training? - use_loss_scaling: bool = False, - # Log2 of initial loss scaling factor. - loss_scaling_init: float = 64.0, - # Log2 of per-minibatch loss scaling increment when there is no overflow. - loss_scaling_inc: float = 0.0005, - # Log2 of per-minibatch loss scaling decrement when there is an overflow. - loss_scaling_dec: float = 1.0, - # Report fine-grained memory usage statistics in TensorBoard? - report_mem_usage: bool = False, - **kwargs): - - # Public fields. - self.name = name - self.learning_rate = learning_rate - self.minibatch_multiplier = minibatch_multiplier - self.id = self.name.replace("/", ".") - self.scope = tf.get_default_graph().unique_name(self.id) - self.optimizer_class = util.get_obj_by_name(tf_optimizer) - self.optimizer_kwargs = dict(kwargs) - self.use_loss_scaling = use_loss_scaling - self.loss_scaling_init = loss_scaling_init - self.loss_scaling_inc = loss_scaling_inc - self.loss_scaling_dec = loss_scaling_dec - - # Private fields. - self._updates_applied = False - self._devices = OrderedDict() # device_name => EasyDict() - self._shared_optimizers = OrderedDict() # device_name => optimizer_class - self._gradient_shapes = None # [shape, ...] - self._report_mem_usage = report_mem_usage - - # Validate arguments. - assert callable(self.optimizer_class) - - # Share internal state if requested. - if share is not None: - assert isinstance(share, Optimizer) - assert self.optimizer_class is share.optimizer_class - assert self.learning_rate is share.learning_rate - assert self.optimizer_kwargs == share.optimizer_kwargs - self._shared_optimizers = share._shared_optimizers # pylint: disable=protected-access - - def _get_device(self, device_name: str): - """Get internal state for the given TensorFlow device.""" - tfutil.assert_tf_initialized() - if device_name in self._devices: - return self._devices[device_name] - - # Initialize fields. - device = util.EasyDict() - device.name = device_name - device.optimizer = None # Underlying optimizer: optimizer_class - device.loss_scaling_var = None # Log2 of loss scaling: tf.Variable - # Raw gradients: var => [grad, ...] - device.grad_raw = OrderedDict() - device.grad_clean = OrderedDict() # Clean gradients: var => grad - # Accumulation sums: var => tf.Variable - device.grad_acc_vars = OrderedDict() - device.grad_acc_count = None # Accumulation counter: tf.Variable - device.grad_acc = OrderedDict() # Accumulated gradients: var => grad - - # Setup TensorFlow objects. - with tfutil.absolute_name_scope(self.scope + "/Devices"), tf.device(device_name), tf.control_dependencies(None): - if device_name not in self._shared_optimizers: - optimizer_name = self.scope.replace( - "/", "_") + "_opt%d" % len(self._shared_optimizers) - self._shared_optimizers[device_name] = self.optimizer_class( - name=optimizer_name, learning_rate=self.learning_rate, **self.optimizer_kwargs) - device.optimizer = self._shared_optimizers[device_name] - if self.use_loss_scaling: - device.loss_scaling_var = tf.Variable(np.float32( - self.loss_scaling_init), trainable=False, name="loss_scaling_var") - - # Register device. - self._devices[device_name] = device - return device - - def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None: - """Register the gradients of the given loss function with respect to the given variables. - Intended to be called once per GPU.""" - tfutil.assert_tf_initialized() - assert not self._updates_applied - device = self._get_device(loss.device) - - # Validate trainables. - if isinstance(trainable_vars, dict): - # allow passing in Network.trainables as vars - trainable_vars = list(trainable_vars.values()) - assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1 - assert all(tfutil.is_tf_expression(expr) - for expr in trainable_vars + [loss]) - assert all(var.device == device.name for var in trainable_vars) - - # Validate shapes. - if self._gradient_shapes is None: - self._gradient_shapes = [var.shape.as_list() - for var in trainable_vars] - assert len(trainable_vars) == len(self._gradient_shapes) - assert all(var.shape.as_list() == var_shape for var, - var_shape in zip(trainable_vars, self._gradient_shapes)) - - # Report memory usage if requested. - deps = [] - if self._report_mem_usage: - self._report_mem_usage = False - try: - with tf.name_scope(self.id + '_mem'), tf.device(device.name), tf.control_dependencies([loss]): - deps.append(autosummary.autosummary( - self.id + "/mem_usage_gb", tf.contrib.memory_stats.BytesInUse() / 2**30)) - except tf.errors.NotFoundError: - pass - - # Compute gradients. - with tf.name_scope(self.id + "_grad"), tf.device(device.name), tf.control_dependencies(deps): - loss = self.apply_loss_scaling(tf.cast(loss, tf.float32)) - gate = tf.train.Optimizer.GATE_NONE # disable gating to reduce memory usage - grad_list = device.optimizer.compute_gradients( - loss=loss, var_list=trainable_vars, gate_gradients=gate) - - # Register gradients. - for grad, var in grad_list: - if var not in device.grad_raw: - device.grad_raw[var] = [] - device.grad_raw[var].append(grad) - - def apply_updates(self, allow_no_op: bool = False) -> tf.Operation: - """Construct training op to update the registered variables based on their gradients.""" - tfutil.assert_tf_initialized() - assert not self._updates_applied - self._updates_applied = True - all_ops = [] - - # Check for no-op. - if allow_no_op and len(self._devices) == 0: - with tfutil.absolute_name_scope(self.scope): - return tf.no_op(name='TrainingOp') - - # Clean up gradients. - for device_idx, device in enumerate(self._devices.values()): - with tfutil.absolute_name_scope(self.scope + "/Clean%d" % device_idx), tf.device(device.name): - for var, grad in device.grad_raw.items(): - - # Filter out disconnected gradients and convert to float32. - grad = [g for g in grad if g is not None] - grad = [tf.cast(g, tf.float32) for g in grad] - - # Sum within the device. - if len(grad) == 0: - grad = tf.zeros(var.shape) # No gradients => zero. - elif len(grad) == 1: - # Single gradient => use as is. - grad = grad[0] - else: - # Multiple gradients => sum. - grad = tf.add_n(grad) - - # Scale as needed. - scale = 1.0 / \ - len(device.grad_raw[var]) / len(self._devices) - scale = tf.constant(scale, dtype=tf.float32, name="scale") - if self.minibatch_multiplier is not None: - scale /= tf.cast(self.minibatch_multiplier, tf.float32) - scale = self.undo_loss_scaling(scale) - device.grad_clean[var] = grad * scale - - # Sum gradients across devices. - if len(self._devices) > 1: - with tfutil.absolute_name_scope(self.scope + "/Broadcast"), tf.device(None): - for all_vars in zip(*[device.grad_clean.keys() for device in self._devices.values()]): - # NCCL does not support zero-sized tensors. - if len(all_vars) > 0 and all(dim > 0 for dim in all_vars[0].shape.as_list()): - all_grads = [device.grad_clean[var] for device, var in zip( - self._devices.values(), all_vars)] - all_grads = nccl_ops.all_sum(all_grads) - for device, var, grad in zip(self._devices.values(), all_vars, all_grads): - device.grad_clean[var] = grad - - # Apply updates separately on each device. - for device_idx, device in enumerate(self._devices.values()): - with tfutil.absolute_name_scope(self.scope + "/Apply%d" % device_idx), tf.device(device.name): - # pylint: disable=cell-var-from-loop - - # Accumulate gradients over time. - if self.minibatch_multiplier is None: - acc_ok = tf.constant(True, name='acc_ok') - device.grad_acc = OrderedDict(device.grad_clean) - else: - # Create variables. - with tf.control_dependencies(None): - for var in device.grad_clean.keys(): - device.grad_acc_vars[var] = tf.Variable( - tf.zeros(var.shape), trainable=False, name="grad_acc_var") - device.grad_acc_count = tf.Variable( - tf.zeros([]), trainable=False, name="grad_acc_count") - - # Track counter. - count_cur = device.grad_acc_count + 1.0 - def count_inc_op(): return tf.assign(device.grad_acc_count, count_cur) - def count_reset_op(): return tf.assign(device.grad_acc_count, tf.zeros([])) - acc_ok = (count_cur >= tf.cast( - self.minibatch_multiplier, tf.float32)) - all_ops.append( - tf.cond(acc_ok, count_reset_op, count_inc_op)) - - # Track gradients. - for var, grad in device.grad_clean.items(): - acc_var = device.grad_acc_vars[var] - acc_cur = acc_var + grad - device.grad_acc[var] = acc_cur - with tf.control_dependencies([acc_cur]): - def acc_inc_op(): return tf.assign(acc_var, acc_cur) - def acc_reset_op(): return tf.assign(acc_var, tf.zeros(var.shape)) - all_ops.append( - tf.cond(acc_ok, acc_reset_op, acc_inc_op)) - - # No overflow => apply gradients. - all_ok = tf.reduce_all(tf.stack( - [acc_ok] + [tf.reduce_all(tf.is_finite(g)) for g in device.grad_acc.values()])) - - def apply_op(): return device.optimizer.apply_gradients( - [(tf.cast(grad, var.dtype), var) for var, grad in device.grad_acc.items()]) - all_ops.append(tf.cond(all_ok, apply_op, tf.no_op)) - - # Adjust loss scaling. - if self.use_loss_scaling: - def ls_inc_op(): return tf.assign_add( - device.loss_scaling_var, self.loss_scaling_inc) - def ls_dec_op(): return tf.assign_sub( - device.loss_scaling_var, self.loss_scaling_dec) - - def ls_update_op(): return tf.group(tf.cond(all_ok, ls_inc_op, ls_dec_op)) - all_ops.append(tf.cond(acc_ok, ls_update_op, tf.no_op)) - - # Last device => report statistics. - if device_idx == len(self._devices) - 1: - all_ops.append(autosummary.autosummary( - self.id + "/learning_rate", self.learning_rate)) - all_ops.append(autosummary.autosummary( - self.id + "/overflow_frequency", tf.where(all_ok, 0, 1), condition=acc_ok)) - if self.use_loss_scaling: - all_ops.append(autosummary.autosummary( - self.id + "/loss_scaling_log2", device.loss_scaling_var)) - - # Initialize variables. - self.reset_optimizer_state() - if self.use_loss_scaling: - tfutil.init_uninitialized_vars( - [device.loss_scaling_var for device in self._devices.values()]) - if self.minibatch_multiplier is not None: - tfutil.run([var.initializer for device in self._devices.values() for var in list( - device.grad_acc_vars.values()) + [device.grad_acc_count]]) - - # Group everything into a single op. - with tfutil.absolute_name_scope(self.scope): - return tf.group(*all_ops, name="TrainingOp") - - def reset_optimizer_state(self) -> None: - """Reset internal state of the underlying optimizer.""" - tfutil.assert_tf_initialized() - tfutil.run([var.initializer for device in self._devices.values() - for var in device.optimizer.variables()]) - - def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]: - """Get or create variable representing log2 of the current dynamic loss scaling factor.""" - return self._get_device(device).loss_scaling_var - - def apply_loss_scaling(self, value: TfExpression) -> TfExpression: - """Apply dynamic loss scaling for the given expression.""" - assert tfutil.is_tf_expression(value) - if not self.use_loss_scaling: - return value - return value * tfutil.exp2(self.get_loss_scaling_var(value.device)) - - def undo_loss_scaling(self, value: TfExpression) -> TfExpression: - """Undo the effect of dynamic loss scaling for the given expression.""" - assert tfutil.is_tf_expression(value) - if not self.use_loss_scaling: - return value - return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type - - -class SimpleAdam: - """Simplified version of tf.train.AdamOptimizer that behaves identically when used with dnnlib.tflib.Optimizer.""" - - def __init__(self, name="Adam", learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): - self.name = name - self.learning_rate = learning_rate - self.beta1 = beta1 - self.beta2 = beta2 - self.epsilon = epsilon - self.all_state_vars = [] - - def variables(self): - return self.all_state_vars - - def compute_gradients(self, loss, var_list, gate_gradients=tf.train.Optimizer.GATE_NONE): - assert gate_gradients == tf.train.Optimizer.GATE_NONE - return list(zip(tf.gradients(loss, var_list), var_list)) - - def apply_gradients(self, grads_and_vars): - with tf.name_scope(self.name): - state_vars = [] - update_ops = [] - - # Adjust learning rate to deal with startup bias. - with tf.control_dependencies(None): - b1pow_var = tf.Variable( - dtype=tf.float32, initial_value=1, trainable=False) - b2pow_var = tf.Variable( - dtype=tf.float32, initial_value=1, trainable=False) - state_vars += [b1pow_var, b2pow_var] - b1pow_new = b1pow_var * self.beta1 - b2pow_new = b2pow_var * self.beta2 - update_ops += [tf.assign(b1pow_var, b1pow_new), - tf.assign(b2pow_var, b2pow_new)] - lr_new = self.learning_rate * \ - tf.sqrt(1 - b2pow_new) / (1 - b1pow_new) - - # Construct ops to update each variable. - for grad, var in grads_and_vars: - with tf.control_dependencies(None): - m_var = tf.Variable( - dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False) - v_var = tf.Variable( - dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False) - state_vars += [m_var, v_var] - m_new = self.beta1 * m_var + (1 - self.beta1) * grad - v_new = self.beta2 * v_var + (1 - self.beta2) * tf.square(grad) - var_delta = lr_new * m_new / (tf.sqrt(v_new) + self.epsilon) - update_ops += [tf.assign(m_var, m_new), tf.assign(v_var, - v_new), tf.assign_sub(var, var_delta)] - - # Group everything together. - self.all_state_vars += state_vars - return tf.group(*update_ops) diff --git a/spaces/gyugnsu/DragGan-Inversion/torch_utils/pti.py b/spaces/gyugnsu/DragGan-Inversion/torch_utils/pti.py deleted file mode 100644 index 23be1ee0a101a1c6ce14db7f66673946a0f7d612..0000000000000000000000000000000000000000 --- a/spaces/gyugnsu/DragGan-Inversion/torch_utils/pti.py +++ /dev/null @@ -1,89 +0,0 @@ -import pickle -from PTI.utils.ImagesDataset import ImagesDataset, Image2Dataset -import torch -from PTI.utils.models_utils import load_old_G -from PTI.utils.alignment import align_face - -from PTI.training.coaches.single_id_coach import SingleIDCoach -from PTI.configs import global_config, paths_config -import dlib - -import os -from torchvision.transforms import transforms -from torch.utils.data import DataLoader -from string import ascii_uppercase -import sys -from pathlib import Path - -sys.path.append(".") -# sys.path.append('PTI/') -# sys.path.append('PTI/training/') - - -def run_PTI(img, run_name): - # os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' - # os.environ['CUDA_VISIBLE_DEVICES'] = global_config.cuda_visible_devices - - global_config.run_name = run_name - - global_config.pivotal_training_steps = 1 - global_config.training_step = 1 - - embedding_dir_path = f"{paths_config.embedding_base_dir}/{paths_config.input_data_id}/{paths_config.pti_results_keyword}" - os.makedirs(embedding_dir_path, exist_ok=True) - - # dataset = ImagesDataset(paths_config.input_data_path, transforms.Compose([ - # transforms.ToTensor(), - # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])) - - G = load_old_G() - IMAGE_SIZE = 1024 - predictor = dlib.shape_predictor(paths_config.dlib) - aligned_image = align_face(img, predictor=predictor, output_size=IMAGE_SIZE) - img = aligned_image.resize([G.img_resolution, G.img_resolution]) - dataset = Image2Dataset(img) - - dataloader = DataLoader(dataset, batch_size=1, shuffle=False) - - coach = SingleIDCoach(dataloader, use_wandb=False) - - new_G, w_pivot = coach.train() - return new_G, w_pivot - - -def export_updated_pickle(new_G, out_path, run_name): - image_name = "customIMG" - - with open(paths_config.stylegan2_ada_ffhq, "rb") as f: - old_G = pickle.load(f)["G_ema"].cuda() - - embedding = Path(f"{paths_config.checkpoints_dir}/model_{run_name}_{image_name}.pt") - with open(embedding, "rb") as f_new: - new_G = torch.load(f_new).cuda() - - print("Exporting large updated pickle based off new generator and ffhq.pkl") - with open(paths_config.stylegan2_ada_ffhq, "rb") as f: - d = pickle.load(f) - old_G = d["G_ema"].cuda() # tensor - old_D = d["D"].eval().requires_grad_(False).cpu() - - tmp = {} - tmp["G"] = old_G.eval().requires_grad_(False).cpu() - tmp["G_ema"] = new_G.eval().requires_grad_(False).cpu() - tmp["D"] = old_D - tmp["training_set_kwargs"] = None - tmp["augment_pipe"] = None - - with open(out_path, "wb") as f: - pickle.dump(tmp, f) - # delete - - embedding.unlink() - - -# if __name__ == '__main__': -# from PIL import Image -# img = Image.open('PTI/test/test.jpg') -# new_G, w_pivot = run_PTI(img, use_wandb=False, use_multi_id_training=False) -# out_path = f'checkpoints/stylegan2_custom_512_pytorch.pkl' -# export_updated_pickle(new_G, out_path) diff --git a/spaces/h2oai/wave-tour/examples/db.py b/spaces/h2oai/wave-tour/examples/db.py deleted file mode 100644 index f952754bd02499b7b06144c85322f0d01f6bdc0f..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/db.py +++ /dev/null @@ -1,86 +0,0 @@ -# WaveDB -# How to use WaveDB, a simple sqlite3 database server that ships with Wave. -# --- - -# Before you run this example, start WaveDB (`wavedb`). -# By default, WaveDB listens on port 10100. -# -# To run this example, execute `python db.py` -# -# If your WaveDB instance is configured differently, you might want to set -# the following environment variables accordingly: -# H2O_WAVEDB_ADDRESS - the ip:port of the database server -# H2O_WAVEDB_ACCESS_KEY_ID - the API access key ID -# H2O_WAVEDB_ACCESS_KEY_SECRET - the API access key secret - -import asyncio -from h2o_wave import connect - - -async def main(): - # Create a database connection - connection = connect() - - # Access the 'employees' database. - # A new database is created automatically if it does not exist. - db = connection["employees"] - - # Execute some statements. - await db.exec("drop table if exists employee") - await db.exec("create table employee(empid integer, name text, title text)") - - # Execute a statement and handle errors. - results, err = await db.exec("insert into employee values(?, ?, ?)", 101, 'Jeffrey Lebowski', 'Slacker') - if err: - raise ValueError(err) - - # Execute many statements. - insert_employee = "insert into employee values(?, ?, ?)" - await db.exec_many( - (insert_employee, 102, 'Walter Sobchak', 'Veteran'), - (insert_employee, 103, 'Donny Kerabatsos', 'Sidekick'), - (insert_employee, 104, 'Jesus Quintana', 'Bowler'), - (insert_employee, 105, 'Uli Kunkel', 'Nihilist'), - ) - - # Execute many statements as a transaction. - await db.exec_atomic( - (insert_employee, 106, 'Brandt', 'Butler'), - (insert_employee, 107, 'Maude Lebowski', 'Artist'), - (insert_employee, 108, 'Franz', 'Nihilist'), - (insert_employee, 109, 'Kieffer', 'Nihilist'), - ) - - # Read records. - rows, err = await db.exec("select * from employee") - if err: - raise ValueError(err) - - print(rows) - - # Prints: - # [ - # [101, 'Jeffrey Lebowski', 'Slacker'], - # [102, 'Walter Sobchak', 'Veteran'], - # [103, 'Donny Kerabatsos', 'Sidekick'], - # [104, 'Jesus Quintana', 'Bowler'], - # [105, 'Uli Kunkel', 'Nihilist'], - # [106, 'Brandt', 'Butler'], - # [107, 'Maude Lebowski', 'Artist'], - # [108, 'Franz', 'Nihilist'], - # [109, 'Kieffer', 'Nihilist'] - # ] - - # Clean up. - await db.exec("drop table employee") - - # Drop the database entirely. Warning: A database is irrecoverable once dropped. - await db.drop() - - # Close connection. - await connection.close() - - -loop = asyncio.new_event_loop() -asyncio.set_event_loop(loop) -loop.run_until_complete(main()) diff --git a/spaces/h2oai/wave-tour/examples/ml_h2o.py b/spaces/h2oai/wave-tour/examples/ml_h2o.py deleted file mode 100644 index 0ff83209e104c91ff9d7a6126bc1a46071f0c706..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/ml_h2o.py +++ /dev/null @@ -1,61 +0,0 @@ -# WaveML / H2O-3 -# Build Wave Models for training and prediction of classification or regression using H2O-3 AutoML. -# --- -from h2o_wave import main, app, Q, ui -from h2o_wave_ml import build_model, ModelType - -from sklearn.datasets import load_wine -from sklearn.model_selection import train_test_split - - -@app('/demo') -async def serve(q: Q): - if q.args.train: - # train WaveML Model using H2O-3 AutoML - q.client.wave_model = build_model( - train_df=q.client.train_df, - target_column='target', - model_type=ModelType.H2O3, - _h2o3_max_runtime_secs=5, - _h2o3_nfolds=2 - ) - model_id = q.client.wave_model.model.model_id - accuracy = round(100 - q.client.wave_model.model.mean_per_class_error() * 100, 2) - - # show training details and prediction option - q.page['example'].predict.disabled = False - q.page['example'].message.type = 'success' - q.page['example'].message.text = 'Training successfully completed!' - q.page['example'].model_id.content = f'''**H2O AutoML model id:** {model_id}
      - **Accuracy:** {accuracy}%''' - q.page['example'].example_predictions.content = '' - elif q.args.predict: - # predict on test data - preds = q.client.wave_model.predict(test_df=q.client.test_df) - - # show predictions - q.page['example'].message.text = 'Prediction successfully completed!' - q.page['example'].example_predictions.content = f'''**Example predictions:**
      - {preds[0]}
      {preds[1]}
      {preds[2]}''' - else: - # prepare sample train and test dataframes - data = load_wine(as_frame=True)['frame'] - q.client.train_df, q.client.test_df = train_test_split(data, train_size=0.8) - - # display ui - q.page['example'] = ui.form_card( - box='1 1 -1 -1', - items=[ - ui.text(content='''The sample dataset used is the - wine dataset.'''), - ui.buttons(items=[ - ui.button(name='train', label='Train', primary=True), - ui.button(name='predict', label='Predict', primary=True, disabled=True), - ]), - ui.message_bar(name='message', type='warning', text='Training will take a few seconds'), - ui.text(name='model_id', content=''), - ui.text(name='example_predictions', content='') - ] - ) - - await q.page.save() diff --git a/spaces/h2oai/wave-tour/examples/plot_path_point.py b/spaces/h2oai/wave-tour/examples/plot_path_point.py deleted file mode 100644 index 4c388d752d088b1f13fbdd4b0689b6ce665b63ca..0000000000000000000000000000000000000000 --- a/spaces/h2oai/wave-tour/examples/plot_path_point.py +++ /dev/null @@ -1,29 +0,0 @@ -# Plot / Path / Point -# Make a path #plot with an additional layer of points. -# --- -from h2o_wave import site, data, ui - -page = site['/demo'] - -page.add('example', ui.plot_card( - box='1 1 4 5', - title='Path + Point', - data=data('price performance', 10, rows=[ - (0.1, 0.6), - (0.2, 0.5), - (0.3, 0.3), - (0.4, 0.2), - (0.4, 0.5), - (0.2, 0.2), - (0.8, 0.5), - (0.3, 0.3), - (0.2, 0.4), - (0.1, 0.0), - ]), - plot=ui.plot([ - ui.mark(type='path', x='=price', y='=performance'), - ui.mark(type='point', x='=price', y='=performance'), - ]) -)) - -page.save() diff --git a/spaces/haakohu/deep_privacy2_face/dp2/data/transforms/functional.py b/spaces/haakohu/deep_privacy2_face/dp2/data/transforms/functional.py deleted file mode 100644 index 1ee57f27ad07e597098ce1de967c3a50a1d06d0a..0000000000000000000000000000000000000000 --- a/spaces/haakohu/deep_privacy2_face/dp2/data/transforms/functional.py +++ /dev/null @@ -1,57 +0,0 @@ -import torchvision.transforms.functional as F -import torch -import pickle -from tops import download_file, assert_shape -from typing import Dict -from functools import lru_cache - -global symmetry_transform - - -@lru_cache(maxsize=1) -def get_symmetry_transform(symmetry_url): - file_name = download_file(symmetry_url) - with open(file_name, "rb") as fp: - symmetry = pickle.load(fp) - return torch.from_numpy(symmetry["vertex_transforms"]).long() - - -hflip_handled_cases = set([ - "keypoints", "img", "mask", "border", "semantic_mask", "vertices", "E_mask", "embed_map", "condition", - "embedding", "vertx2cat", "maskrcnn_mask", "__key__"]) - - -def hflip(container: Dict[str, torch.Tensor], flip_map=None) -> Dict[str, torch.Tensor]: - container["img"] = F.hflip(container["img"]) - if "condition" in container: - container["condition"] = F.hflip(container["condition"]) - if "embedding" in container: - container["embedding"] = F.hflip(container["embedding"]) - assert all([key in hflip_handled_cases for key in container]), container.keys() - if "keypoints" in container: - assert flip_map is not None - if container["keypoints"].ndim == 3: - keypoints = container["keypoints"][:, flip_map, :] - keypoints[:, :, 0] = 1 - keypoints[:, :, 0] - else: - assert_shape(container["keypoints"], (None, 3)) - keypoints = container["keypoints"][flip_map, :] - keypoints[:, 0] = 1 - keypoints[:, 0] - container["keypoints"] = keypoints - if "mask" in container: - container["mask"] = F.hflip(container["mask"]) - if "border" in container: - container["border"] = F.hflip(container["border"]) - if "semantic_mask" in container: - container["semantic_mask"] = F.hflip(container["semantic_mask"]) - if "vertices" in container: - symmetry_transform = get_symmetry_transform( - "https://dl.fbaipublicfiles.com/densepose/meshes/symmetry/symmetry_smpl_27554.pkl") - container["vertices"] = F.hflip(container["vertices"]) - symmetry_transform_ = symmetry_transform.to(container["vertices"].device) - container["vertices"] = symmetry_transform_[container["vertices"].long()] - if "E_mask" in container: - container["E_mask"] = F.hflip(container["E_mask"]) - if "maskrcnn_mask" in container: - container["maskrcnn_mask"] = F.hflip(container["maskrcnn_mask"]) - return container diff --git a/spaces/hands012/gpt-academic/docs/README_RS.md b/spaces/hands012/gpt-academic/docs/README_RS.md deleted file mode 100644 index 5ba5fcccc30db520d38e21950e2f7cfc03d324c5..0000000000000000000000000000000000000000 --- a/spaces/hands012/gpt-academic/docs/README_RS.md +++ /dev/null @@ -1,278 +0,0 @@ -> **Note** -> -> Этот файл самовыражения автоматически генерируется модулем перевода markdown в этом проекте и может быть не на 100% правильным. -> -# GPT Академическая оптимизация (GPT Academic) - -**Если вам нравится этот проект, пожалуйста, поставьте ему звезду. Если вы придумали более полезные языковые ярлыки или функциональные плагины, не стесняйтесь открывать issue или pull request. -Чтобы перевести этот проект на произвольный язык с помощью GPT, ознакомьтесь и запустите [`multi_language.py`](multi_language.py) (экспериментальный). - -> **Примечание** -> -> 1. Обратите внимание, что только функциональные плагины (кнопки), помеченные **красным цветом**, поддерживают чтение файлов, некоторые плагины находятся в **выпадающем меню** в области плагинов. Кроме того, мы с наивысшим приоритетом рады и обрабатываем pull requests для любых новых плагинов! -> -> 2. В каждом файле проекта функциональность описана в документе самоанализа [`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). С каждой итерацией выполнения версии вы можете в любое время вызвать повторное создание отчета о самоанализе этого проекта, щелкнув соответствующий функциональный плагин и вызвав GPT. Вопросы сборки описаны в [`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Метод установки](#installation). -> -> 3. Этот проект совместим и поощряет использование китайских языковых моделей chatglm и RWKV, пангу и т. Д. Поддержка нескольких api-key, которые могут существовать одновременно, может быть указан в файле конфигурации, например `API_KEY="openai-key1,openai-key2,api2d-key3"`. Если требуется временно изменить `API_KEY`, введите временный `API_KEY` в области ввода и нажмите клавишу Enter, чтобы он вступил в силу. - -> **Примечание** -> -> При установке зависимостей строго выбирайте версии, **указанные в файле requirements.txt**. -> -> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/`## Задание - -Вы профессиональный переводчик научных статей. - -Переведите этот файл в формате Markdown на русский язык. Не изменяйте существующие команды Markdown, ответьте только переведенными результатами. - -## Результат - -Функция | Описание ---- | --- -Однокнопочный стиль | Поддержка однокнопочного стиля и поиска грамматических ошибок в научных статьях -Однокнопочный перевод на английский и китайский | Однокнопочный перевод на английский и китайский -Однокнопочное объяснение кода | Показ кода, объяснение его, генерация кода, комментирование кода -[Настройка быстрых клавиш](https://www.bilibili.com/video/BV14s4y1E7jN) | Поддержка настройки быстрых клавиш -Модульный дизайн | Поддержка пользовательских функциональных плагинов мощных [функциональных плагинов](https://github.com/binary-husky/chatgpt_academic/tree/master/crazy_functions), плагины поддерживают [горячую замену](https://github.com/binary-husky/chatgpt_academic/wiki/Function-Plug-in-Guide) -[Анализ своей программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] [Однокнопочный просмотр](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academicProject-Self-analysis-Report) исходного кода этого проекта -[Анализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] Однокнопочный анализ дерева других проектов Python/C/C++/Java/Lua/... -Чтение статей, [перевод](https://www.bilibili.com/video/BV1KT411x7Wn) статей | [Функциональный плагин] Однокнопочное чтение полного текста научных статей и генерация резюме -Полный перевод [LaTeX](https://www.bilibili.com/video/BV1nk4y1Y7Js/) и совершенствование | [Функциональный плагин] Однокнопочный перевод или совершенствование LaTeX статьи -Автоматическое комментирование | [Функциональный плагин] Однокнопочное автоматическое генерирование комментариев функций -[Перевод](https://www.bilibili.com/video/BV1yo4y157jV/) Markdown на английский и китайский | [Функциональный плагин] Вы видели обе версии файлов [README](https://github.com/binary-husky/chatgpt_academic/blob/master/docs/README_EN.md) для этих 5 языков? -Отчет о чат-анализе | [Функциональный плагин] После запуска будет автоматически сгенерировано сводное извещение -Функция перевода полного текста [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) | [Функциональный плагин] Извлечение заголовка и резюме [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) и перевод всего документа (многопоточность) -[Arxiv Helper](https://www.bilibili.com/video/BV1LM4y1279X) | [Функциональный плагин] Введите URL статьи на arxiv и одним щелчком мыши переведите резюме и загрузите PDF -[Google Scholar Integration Helper](https://www.bilibili.com/video/BV19L411U7ia) | [Функциональный плагин] При заданном любом URL страницы поиска в Google Scholar позвольте gpt вам помочь [написать обзор](https://www.bilibili.com/video/BV1GP411U7Az/) -Сбор Интернет-информации + GPT | [Функциональный плагин] Однокнопочный [запрос информации из Интернета GPT](https://www.bilibili.com/video/BV1om4y127ck), затем ответьте на вопрос, чтобы информация не устарела никогда -Отображение формул / изображений / таблиц | Может одновременно отображать формулы в [формате Tex и рендеринге](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), поддерживает формулы, подсвечивает код -Поддержка функций с многопоточностью | Поддержка многопоточного вызова chatgpt, однокнопочная обработка [больших объемов текста](https://www.bilibili.com/video/BV1FT411H7c5/) или программ -Темная тема gradio для запуска приложений | Добавьте ```/?__theme=dark``` после URL в браузере, чтобы переключиться на темную тему -[Поддержка нескольких моделей LLM](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) | Они одновременно обслуживаются GPT3.5, GPT4, [Clear ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS) -Подключение нескольких новых моделей LLM, поддержка деплоя[huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Подключение интерфейса Newbing (новый Bing), подключение поддержки [LLaMA](https://github.com/facebookresearch/llama), поддержка [RWKV](https://github.com/BlinkDL/ChatRWKV) и [Pangu α](https://openi.org.cn/pangu/) -Больше новых функций (генерация изображения и т. д.) | См. на конце этого файла…- All buttons are dynamically generated by reading functional.py, and custom functions can be freely added to liberate the clipboard -
      - -
      - -- Revision/Correction -
      - -
      - -- If the output contains formulas, they will be displayed in both tex and rendered form for easy copying and reading -
      - -
      - -- Don't feel like looking at project code? Show the entire project directly in chatgpt -
      - -
      - -- Mixing multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4) -
      - -
      - ---- -# Installation -## Installation-Method 1: Run directly (Windows, Linux or MacOS) - -1. Download the project -```sh -git clone https://github.com/binary-husky/chatgpt_academic.git -cd chatgpt_academic -``` - -2. Configure API_KEY - -In `config.py`, configure API KEY and other settings, [special network environment settings] (https://github.com/binary-husky/gpt_academic/issues/1). - -(P.S. When the program is running, it will first check whether there is a secret configuration file named `config_private.py` and use the configuration in it to replace the same name in` config.py`. Therefore, if you understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git, which can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Priority of read: `environment variable`>`config_private.py`>`config.py`) - - -3. Install dependencies -```sh -# (Option I: If familiar with Python)(Python version 3.9 or above, the newer the better), note: use the official pip source or the aliyun pip source, temporary switching source method: python -m pip install -r requirements.txt - i https://mirrors.aliyun.com/pypi/simple/ -python -m pip install -r requirements.txt - -# (Option II: If unfamiliar with Python)Use Anaconda, the steps are also similar (https://www.bilibili.com/video/BV1rc411W7Dr): -conda create -n gptac_venv python=3.11 # create an Anaconda environment -conda activate gptac_venv # activate Anaconda environment -python -m pip install -r requirements.txt # This step is the same as the pip installation -``` - -
      If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, click here to expand -

      - -[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, you need to install more dependencies (prerequisites: familiar with Python + have used Pytorch + computer configuration is strong): -```sh -# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True) -python -m pip install -r request_llm/requirements_chatglm.txt - -# [Optional step II] Support Fudan MOSS -python -m pip install -r request_llm/requirements_moss.txt -git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, you must be in the project root path - -# [Optional step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently, all supported models are as follows (the jittorllms series currently only supports the docker solution): -AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"] -``` - -

      -
      - - - -4. Run -```sh -python main.py -```5. Testing Function Plugin -``` -- Testing function plugin template function (requires GPT to answer what happened in history today), you can use this function as a template to implement more complex functions - Click "[Function plugin Template Demo] On this day in history" -``` - -## Installation - Method 2: Using Docker - -1. ChatGPT only (recommended for most people) - -``` sh -git clone https://github.com/binary-husky/chatgpt_academic.git # download the project -cd chatgpt_academic # enter the path -nano config.py # edit config.py with any text editor to configure "Proxy", "API_KEY", and "WEB_PORT" (eg 50923) -docker build -t gpt-academic . # install - -# (Last step-Option 1) In a Linux environment, using `--net=host` is more convenient and faster -docker run --rm -it --net=host gpt-academic -# (Last step-Option 2) In macOS/windows environment, only -p option can be used to expose the port on the container (eg 50923) to the port on the host -docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic -``` - -2. ChatGPT + ChatGLM + MOSS (requires familiarity with Docker) - -``` sh -# Edit docker-compose.yml, delete solutions 1 and 3, and keep solution 2. Modify the configuration of solution 2 in docker-compose.yml, refer to the comments in it -docker-compose up -``` - -3. ChatGPT + LLAMA + PanGu + RWKV (requires familiarity with Docker) -``` sh -# Edit docker-compose.yml, delete solutions 1 and 2, and keep solution 3. Modify the configuration of solution 3 in docker-compose.yml, refer to the comments in it -docker-compose up -``` - - -## Installation Method 3: Other Deployment Methods - -1. How to use reverse proxy URL/Microsoft Azure API -Configure API_URL_REDIRECT according to the instructions in `config.py`. - -2. Remote Cloud Server Deployment (Requires Knowledge and Experience of Cloud Servers) -Please visit [Deployment Wiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97) - -3. Using WSL2 (Windows Subsystem for Linux subsystem) -Please visit [Deployment Wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2) - -4. How to run at the secondary URL (such as `http://localhost/subpath`) -Please visit [FastAPI Operation Instructions](docs/WithFastapi.md) - -5. Using docker-compose to run -Please read docker-compose.yml and follow the prompts to operate. - ---- -# Advanced Usage -## Customize new convenient buttons / custom function plugins - -1. Customize new convenient buttons (academic shortcuts) -Open `core_functional.py` with any text editor, add an entry as follows, and then restart the program. (If the button has been added successfully and is visible, both prefixes and suffixes can be hot-modified without having to restart the program.) -For example: -``` -"Super English to Chinese": { - # Prefix, will be added before your input. For example, describe your requirements, such as translation, code interpretation, polishing, etc. - "Prefix": "Please translate the following content into Chinese, and then explain each proper noun that appears in the text with a markdown table:\n\n", - - # Suffix, will be added after your input. For example, with the prefix, you can enclose your input content in quotes. - "Suffix": "", -}, -``` -
      - -
      - -2. Custom function plugin - -Write powerful function plugins to perform any task you can and can't imagine. -The difficulty of debugging and writing plugins in this project is very low. As long as you have a certain knowledge of python, you can implement your own plugin function by imitating the template we provide. -Please refer to the [Function Plugin Guide](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) for details. - ---- -# Latest Update -## New feature dynamic - -1. Сохранение диалогов. Вызовите "Сохранить текущий диалог" в разделе функций-плагина, чтобы сохранить текущий диалог как файл HTML, который можно прочитать и восстановить. Кроме того, вызовите «Загрузить архив истории диалога» в меню функций-плагина, чтобы восстановить предыдущую сессию. Совет: если нажать кнопку "Загрузить исторический архив диалога" без указания файла, можно просмотреть кэш исторических файлов HTML. Щелкните "Удалить все локальные записи истории диалогов", чтобы удалить все файловые кэши HTML. - -2. Создание отчетов. Большинство плагинов создают рабочий отчет после завершения выполнения. -  -3. Модульный дизайн функций, простой интерфейс, но сильный функционал. - -4. Это проект с открытым исходным кодом, который может «сам переводить себя». - -5. Перевод других проектов с открытым исходным кодом - это не проблема. - -6. Мелкие функции декорирования [live2d](https://github.com/fghrsh/live2d_demo) (по умолчанию отключены, нужно изменить `config.py`). - -7. Поддержка большой языковой модели MOSS. - -8. Генерация изображений с помощью OpenAI. - -9. Анализ и подведение итогов аудиофайлов с помощью OpenAI. - -10. Полный цикл проверки правописания с использованием LaTeX. - -## Версии: -- Версия 3.5 (Todo): использование естественного языка для вызова функций-плагинов проекта (высокий приоритет) -- Версия 3.4 (Todo): улучшение многопоточной поддержки локальных больших моделей чата. -- Версия 3.3: добавлена функция объединения интернет-информации. -- Версия 3.2: функции-плагины поддерживают большое количество параметров (сохранение диалогов, анализирование любого языка программирования и одновременное запрос LLM-групп). -- Версия 3.1: поддержка одновременного запроса нескольких моделей GPT! Поддержка api2d, сбалансированное распределение нагрузки по нескольким ключам api. -- Версия 3.0: поддержка chatglm и других небольших LLM. -- Версия 2.6: перестройка структуры плагинов, улучшение интерактивности, добавлено больше плагинов. -- Версия 2.5: автоматическое обновление для решения проблемы длинного текста и переполнения токенов при обработке больших проектов. -- Версия 2.4: (1) добавлена функция полного перевода PDF; (2) добавлена функция переключения положения ввода; (3) добавлена опция вертикального макета; (4) оптимизация многопоточности плагинов. -- Версия 2.3: улучшение многопоточной интерактивности. -- Версия 2.2: функции-плагины поддерживают горячую перезагрузку. -- Версия 2.1: раскрывающийся макет. -- Версия 2.0: использование модульных функций-плагинов. -- Версия 1.0: базовые функции. - -gpt_academic Разработчик QQ-группы-2: 610599535 - -- Известные проблемы - - Некоторые плагины перевода в браузерах мешают работе фронтенда этого программного обеспечения - - Высокая или низкая версия gradio может вызвать множество исключений - -## Ссылки и учебные материалы - -``` -Мы использовали многие концепты кода из других отличных проектов, включая: - -# Проект 1: Qinghua ChatGLM-6B: -https://github.com/THUDM/ChatGLM-6B - -# Проект 2: Qinghua JittorLLMs: -https://github.com/Jittor/JittorLLMs - -# Проект 3: Edge-GPT: -https://github.com/acheong08/EdgeGPT - -# Проект 4: Chuanhu ChatGPT: -https://github.com/GaiZhenbiao/ChuanhuChatGPT - -# Проект 5: ChatPaper: -https://github.com/kaixindelele/ChatPaper - -# Больше: -https://github.com/gradio-app/gradio -https://github.com/fghrsh/live2d_demo -``` \ No newline at end of file diff --git a/spaces/haoqi7/research/widgets/sidebar.py b/spaces/haoqi7/research/widgets/sidebar.py deleted file mode 100644 index 75474bc6ae24c1ddee753c27f62ef0318c0f99cc..0000000000000000000000000000000000000000 --- a/spaces/haoqi7/research/widgets/sidebar.py +++ /dev/null @@ -1,96 +0,0 @@ -import streamlit as st -import datetime -# from .utils import PACKAGE_ROOT -from lrt.utils.functions import template - -APP_VERSION = 'v1.4.1' - -def render_sidebar(): - icons = f''' -
      - email -
      - ''' - - sidebar_markdown = f''' - -
      - TUM - -

      - Literature Research Tool -

      - - - - {APP_VERSION} - - - -
      - - - {icons} - - --- - - ## Choose the Paper Search Platforms''' - st.sidebar.markdown(sidebar_markdown,unsafe_allow_html=True) - # elvsier = st.sidebar.checkbox('Elvsier',value=True) - # IEEE = st.sidebar.checkbox('IEEE',value=False) - # google = st.sidebar.checkbox('Google Scholar') - platforms = st.sidebar.multiselect('Platforms',options= - [ - # 'Elvsier', - 'IEEE', - # 'Google Scholar', - 'Arxiv', - 'Paper with Code' - ], default=[ - # 'Elvsier', - 'IEEE', - # 'Google Scholar', - 'Arxiv', - 'Paper with Code' - ]) - - - - st.sidebar.markdown('## Choose the max number of papers to search') - number_papers=st.sidebar.slider('number', 10, 100, 20, 5) - - st.sidebar.markdown('## Choose the start year of publication') - this_year = datetime.date.today().year - start_year = st.sidebar.slider('year start:', 2000, this_year, 2010, 1) - - st.sidebar.markdown('## Choose the end year of publication') - end_year = st.sidebar.slider('year end:', 2000, this_year, this_year, 1) - - - with st.sidebar: - st.markdown('## Adjust hyperparameters') - with st.expander('Clustering Options'): - standardization = st.selectbox('1) Standardization before clustering', options=['no', 'yes'], index=0 ) - dr = st.selectbox('2) Dimension reduction', options=['none', 'pca'], index=0) - tmp = min(number_papers,15) - max_k = st.slider('3) Max number of clusters', 2,tmp , tmp//2) - cluster_model = st.selectbox('4) Clustering model', options=['Gaussian Mixture Model', 'K-means'], index=0) - - with st.expander('Keyphrases Generation Options'): - model_cpt = st.selectbox(label='Model checkpoint', options=template.keywords_extraction.keys(),index=0) - - - st.markdown('---') - st.markdown(icons,unsafe_allow_html=True) - st.markdown('''
      Copyright © 2022 by HAO Qi
      ''',unsafe_allow_html=True) - - # st.sidebar.markdown('## Choose the number of clusters') - # k = st.sidebar.slider('number',1,10,3) - - return platforms, number_papers, start_year, end_year, dict( - dimension_reduction= dr, - max_k = max_k, - model_cpt = model_cpt, - standardization = True if standardization == 'yes' else False, - cluster_model = 'gmm' if cluster_model == 'Gaussian Mixture Model' else 'kmeans-euclidean' - ) \ No newline at end of file diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/docker/README.md b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/docker/README.md deleted file mode 100644 index 760c4054d0e4fa56a67ab4b59c14979498e2f94a..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/docker/README.md +++ /dev/null @@ -1,36 +0,0 @@ - -## Use the container (with docker ≥ 19.03) - -``` -cd docker/ -# Build: -docker build --build-arg USER_ID=$UID -t detectron2:v0 . -# Run: -docker run --gpus all -it \ - --shm-size=8gb --env="DISPLAY" --volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \ - --name=detectron2 detectron2:v0 - -# Grant docker access to host X server to show images -xhost +local:`docker inspect --format='{{ .Config.Hostname }}' detectron2` -``` - -## Use the container (with docker < 19.03) - -Install docker-compose and nvidia-docker2, then run: -``` -cd docker && USER_ID=$UID docker-compose run detectron2 -``` - -#### Using a persistent cache directory - -You can prevent models from being re-downloaded on every run, -by storing them in a cache directory. - -To do this, add `--volume=$HOME/.torch/fvcore_cache:/tmp:rw` in the run command. - -## Install new dependencies -Add the following to `Dockerfile` to make persistent changes. -``` -RUN sudo apt-get update && sudo apt-get install -y vim -``` -Or run them in the container to make temporary changes. diff --git a/spaces/hezhaoqia/vits-simple-api/bert_vits2/text/chinese_bert.py b/spaces/hezhaoqia/vits-simple-api/bert_vits2/text/chinese_bert.py deleted file mode 100644 index 3079c47480258b2d86579493b3b0220ed7d19c30..0000000000000000000000000000000000000000 --- a/spaces/hezhaoqia/vits-simple-api/bert_vits2/text/chinese_bert.py +++ /dev/null @@ -1,60 +0,0 @@ -import config -import torch -from transformers import AutoTokenizer, AutoModelForMaskedLM -from logger import logger - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -try: - logger.info("Loading chinese-roberta-wwm-ext-large...") - tokenizer = AutoTokenizer.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/chinese-roberta-wwm-ext-large") - model = AutoModelForMaskedLM.from_pretrained(config.ABS_PATH + "/bert_vits2/bert/chinese-roberta-wwm-ext-large").to( - device) - logger.info("Loading finished.") -except Exception as e: - logger.error(e) - logger.error(f"Please download model from hfl/chinese-roberta-wwm-ext-large.") - - -def get_bert_feature(text, word2ph): - with torch.no_grad(): - inputs = tokenizer(text, return_tensors='pt') - for i in inputs: - inputs[i] = inputs[i].to(device) - res = model(**inputs, output_hidden_states=True) - res = torch.cat(res['hidden_states'][-3:-2], -1)[0].cpu() - - assert len(word2ph) == len(text) + 2 - word2phone = word2ph - phone_level_feature = [] - for i in range(len(word2phone)): - repeat_feature = res[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - - return phone_level_feature.T - - -if __name__ == '__main__': - # feature = get_bert_feature('你好,我是说的道理。') - import torch - - word_level_feature = torch.rand(38, 1024) # 12个词,每个词1024维特征 - word2phone = [1, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 1, 2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 1, 2, - 2, 2, 2, 1] - - # 计算总帧数 - total_frames = sum(word2phone) - print(word_level_feature.shape) - print(word2phone) - phone_level_feature = [] - for i in range(len(word2phone)): - print(word_level_feature[i].shape) - - # 对每个词重复word2phone[i]次 - repeat_feature = word_level_feature[i].repeat(word2phone[i], 1) - phone_level_feature.append(repeat_feature) - - phone_level_feature = torch.cat(phone_level_feature, dim=0) - print(phone_level_feature.shape) # torch.Size([36, 1024]) diff --git a/spaces/hf-audio/whisper-large-v3/README.md b/spaces/hf-audio/whisper-large-v3/README.md deleted file mode 100644 index 72d3a93b394ba1f0089619434b813e7193611f4e..0000000000000000000000000000000000000000 --- a/spaces/hf-audio/whisper-large-v3/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Whisper Large V3 -emoji: 🤫 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.38.0 -app_file: app.py -pinned: false -tags: -- whisper-event ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/huggingchat/chat-ui/src/routes/search/[id]/+server.ts b/spaces/huggingchat/chat-ui/src/routes/search/[id]/+server.ts deleted file mode 100644 index 4bd856a873af95dd1def311b0049b7f772a2c238..0000000000000000000000000000000000000000 --- a/spaces/huggingchat/chat-ui/src/routes/search/[id]/+server.ts +++ /dev/null @@ -1,39 +0,0 @@ -import { collections } from "$lib/server/database"; -import { hashConv } from "$lib/utils/hashConv"; -import { error } from "@sveltejs/kit"; -import { ObjectId } from "mongodb"; - -export async function GET({ params, locals }) { - const searchId = new ObjectId(params.id); - - const search = await collections.webSearches.findOne({ - _id: searchId, - }); - - if (!search) { - throw error(404, "Search query not found"); - } - - const conv = await collections.conversations.findOne({ - _id: search.convId, - }); - - if (!conv) { - throw error(404, "Conversation not found"); - } - - // there's no better way to see if a conversation has been shared, so we hash the messages and see if there's a shared conversation with the same hash - const hash = await hashConv(conv); - const sharedConv = await collections.sharedConversations.findOne({ - hash: hash, - }); - - const userShouldSeeConv = - (conv.userId && locals.user?._id.toString() === conv.userId.toString()) || sharedConv !== null; - - if (!userShouldSeeConv) { - throw error(403, "You don't have access to the conversation here."); - } - - return new Response(JSON.stringify(search), { headers: { "Content-Type": "application/json" } }); -} diff --git a/spaces/huggingface/Model_Cards_Writing_Tool/markdownTagExtract.py b/spaces/huggingface/Model_Cards_Writing_Tool/markdownTagExtract.py deleted file mode 100644 index ebf7e44bf46e8de746ba8775130d57801e2d4608..0000000000000000000000000000000000000000 --- a/spaces/huggingface/Model_Cards_Writing_Tool/markdownTagExtract.py +++ /dev/null @@ -1,99 +0,0 @@ -#from lib import tag_checker -import glob -import fileinput -import os - -def tag_checker(file,start_header,end_header): - markdown_fp = open(file, "r") - - # Needed for later - idea_list = [] - idea_counter = 0 - - start_t = start_header - end_t = end_header - - inside_tag = False - for line in markdown_fp: - start_tag = start_t in line - end_tag = end_t in line - outside_tag = not inside_tag - - if start_tag and outside_tag: - # Start tag - tag_start_index = line.index(start_t) + len(end_t) - line = line[tag_start_index:] - - # This is where we'll store the idea - idea_list.append("") - - inside_tag = True - - if end_tag and inside_tag: - # End tag - end_tag_index = line.index(end_t) - - line = line[:end_tag_index] - - idea_list[idea_counter] += line - idea_counter += 1 - inside_tag = False - - if inside_tag: - # Extract - idea_list[idea_counter] += line - markdown_fp.close() - return idea_list - -def listToString(s): - - # initialize an empty string - str1 = "" - - # traverse in the string - for ele in s: - str1 += ele - - # return string - return str1 - - -def to_markdown(new_file, text_list): - new_file_name = open(new_file, "w") - - #new_file_name.write("# Collection of ideas\n") - - for i, idea in enumerate(text_list): - new_file_name.write(idea + "\n") - - new_file_name.close() - -def combine_markdowns(document1, original_document): - pat = document1 - with open(original_document, 'w') as fout: - for line in sorted(fileinput.input(glob.glob(pat))): - fout.write(line) - return original_document - -if __name__ == "__main__": - file = "template.md" - header_1_start = '' - header_1_end = '' - - header_2_start = '' - header_2_end = '' - - - how_to_start = (tag_checker(file,header_2_start,header_2_end)) - - intended_use_limits = (tag_checker(file,header_2_start,header_2_end)) - string_s = listToString(how_to_start) - print(string_s) - combine_markdowns = how_to_start + intended_use_limits - - - #to_markdown ('combined.md',combine_markdowns) - - - - \ No newline at end of file diff --git a/spaces/hysts-samples/save-user-preferences/style.css b/spaces/hysts-samples/save-user-preferences/style.css deleted file mode 100644 index 733eabca976ad10866b70059bf2f32372c8115dc..0000000000000000000000000000000000000000 --- a/spaces/hysts-samples/save-user-preferences/style.css +++ /dev/null @@ -1,9 +0,0 @@ -h1 { - text-align: center; -} - -#component-0 { - max-width: 800px; - margin: auto; - padding-top: 1.5rem; -} diff --git a/spaces/hysts/BLIP2-with-transformers/app.py b/spaces/hysts/BLIP2-with-transformers/app.py deleted file mode 100644 index 2bf9a0db996eac8da62fb7c445d7b4393cf051ea..0000000000000000000000000000000000000000 --- a/spaces/hysts/BLIP2-with-transformers/app.py +++ /dev/null @@ -1,327 +0,0 @@ -#!/usr/bin/env python - -from __future__ import annotations - -import os -import string - -import gradio as gr -import PIL.Image -import spaces -import torch -from transformers import AutoProcessor, Blip2ForConditionalGeneration - -DESCRIPTION = "# [BLIP-2](https://github.com/salesforce/LAVIS/tree/main/projects/blip2)" - -if not torch.cuda.is_available(): - DESCRIPTION += "\n

      Running on CPU 🥶 This demo does not work on CPU.

      " - -device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - -MODEL_ID_OPT_2_7B = "Salesforce/blip2-opt-2.7b" -MODEL_ID_OPT_6_7B = "Salesforce/blip2-opt-6.7b" -MODEL_ID_FLAN_T5_XL = "Salesforce/blip2-flan-t5-xl" -MODEL_ID_FLAN_T5_XXL = "Salesforce/blip2-flan-t5-xxl" -MODEL_ID = os.getenv("MODEL_ID", MODEL_ID_FLAN_T5_XXL) -assert MODEL_ID in [MODEL_ID_OPT_2_7B, MODEL_ID_OPT_6_7B, MODEL_ID_FLAN_T5_XL, MODEL_ID_FLAN_T5_XXL] - -if torch.cuda.is_available(): - processor = AutoProcessor.from_pretrained(MODEL_ID) - model = Blip2ForConditionalGeneration.from_pretrained(MODEL_ID, device_map="auto", load_in_8bit=True) - - -@spaces.GPU -def generate_caption( - image: PIL.Image.Image, - decoding_method: str = "Nucleus sampling", - temperature: float = 1.0, - length_penalty: float = 1.0, - repetition_penalty: float = 1.5, - max_length: int = 50, - min_length: int = 1, - num_beams: int = 5, - top_p: float = 0.9, -) -> str: - inputs = processor(images=image, return_tensors="pt").to(device, torch.float16) - generated_ids = model.generate( - pixel_values=inputs.pixel_values, - do_sample=decoding_method == "Nucleus sampling", - temperature=temperature, - length_penalty=length_penalty, - repetition_penalty=repetition_penalty, - max_length=max_length, - min_length=min_length, - num_beams=num_beams, - top_p=top_p, - ) - result = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() - return result - - -@spaces.GPU -def answer_question( - image: PIL.Image.Image, - prompt: str, - decoding_method: str = "Nucleus sampling", - temperature: float = 1.0, - length_penalty: float = 1.0, - repetition_penalty: float = 1.5, - max_length: int = 50, - min_length: int = 1, - num_beams: int = 5, - top_p: float = 0.9, -) -> str: - inputs = processor(images=image, text=prompt, return_tensors="pt").to(device, torch.float16) - generated_ids = model.generate( - **inputs, - do_sample=decoding_method == "Nucleus sampling", - temperature=temperature, - length_penalty=length_penalty, - repetition_penalty=repetition_penalty, - max_length=max_length, - min_length=min_length, - num_beams=num_beams, - top_p=top_p, - ) - result = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() - return result - - -def postprocess_output(output: str) -> str: - if output and output[-1] not in string.punctuation: - output += "." - return output - - -def chat( - image: PIL.Image.Image, - text: str, - decoding_method: str = "Nucleus sampling", - temperature: float = 1.0, - length_penalty: float = 1.0, - repetition_penalty: float = 1.5, - max_length: int = 50, - min_length: int = 1, - num_beams: int = 5, - top_p: float = 0.9, - history_orig: list[str] = [], - history_qa: list[str] = [], -) -> tuple[list[tuple[str, str]], list[str], list[str]]: - history_orig.append(text) - text_qa = f"Question: {text} Answer:" - history_qa.append(text_qa) - prompt = " ".join(history_qa) - - output = answer_question( - image=image, - prompt=prompt, - decoding_method=decoding_method, - temperature=temperature, - length_penalty=length_penalty, - repetition_penalty=repetition_penalty, - max_length=max_length, - min_length=min_length, - num_beams=num_beams, - top_p=top_p, - ) - output = postprocess_output(output) - history_orig.append(output) - history_qa.append(output) - - chat_val = list(zip(history_orig[0::2], history_orig[1::2])) - return chat_val, history_orig, history_qa - - -examples = [ - [ - "images/house.png", - "How could someone get out of the house?", - ], - [ - "images/flower.jpg", - "What is this flower and where is it's origin?", - ], - [ - "images/pizza.jpg", - "What are steps to cook it?", - ], - [ - "images/sunset.jpg", - "Here is a romantic message going along the photo:", - ], - [ - "images/forbidden_city.webp", - "In what dynasties was this place built?", - ], -] - -with gr.Blocks(css="style.css") as demo: - gr.Markdown(DESCRIPTION) - gr.DuplicateButton( - value="Duplicate Space for private use", - elem_id="duplicate-button", - visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1", - ) - - with gr.Box(): - image = gr.Image(type="pil") - with gr.Tabs(): - with gr.Tab(label="Image Captioning"): - caption_button = gr.Button("Caption it!") - caption_output = gr.Textbox(label="Caption Output", show_label=False, container=False) - with gr.Tab(label="Visual Question Answering"): - chatbot = gr.Chatbot(label="VQA Chat", show_label=False) - history_orig = gr.State(value=[]) - history_qa = gr.State(value=[]) - vqa_input = gr.Text(label="Chat Input", show_label=False, max_lines=1, container=False) - with gr.Row(): - clear_chat_button = gr.Button("Clear") - chat_button = gr.Button("Submit", variant="primary") - with gr.Accordion(label="Advanced settings", open=False): - text_decoding_method = gr.Radio( - label="Text Decoding Method", - choices=["Beam search", "Nucleus sampling"], - value="Nucleus sampling", - ) - temperature = gr.Slider( - label="Temperature", - info="Used with nucleus sampling.", - minimum=0.5, - maximum=1.0, - step=0.1, - value=1.0, - ) - length_penalty = gr.Slider( - label="Length Penalty", - info="Set to larger for longer sequence, used with beam search.", - minimum=-1.0, - maximum=2.0, - step=0.2, - value=1.0, - ) - repetition_penalty = gr.Slider( - label="Repetition Penalty", - info="Larger value prevents repetition.", - minimum=1.0, - maximum=5.0, - step=0.5, - value=1.5, - ) - max_length = gr.Slider( - label="Max Length", - minimum=1, - maximum=512, - step=1, - value=50, - ) - min_length = gr.Slider( - label="Minimum Length", - minimum=1, - maximum=100, - step=1, - value=1, - ) - num_beams = gr.Slider( - label="Number of Beams", - minimum=1, - maximum=10, - step=1, - value=5, - ) - top_p = gr.Slider( - label="Top P", - info="Used with nucleus sampling.", - minimum=0.5, - maximum=1.0, - step=0.1, - value=0.9, - ) - - gr.Examples( - examples=examples, - inputs=[image, vqa_input], - ) - - caption_button.click( - fn=generate_caption, - inputs=[ - image, - text_decoding_method, - temperature, - length_penalty, - repetition_penalty, - max_length, - min_length, - num_beams, - top_p, - ], - outputs=caption_output, - api_name="caption", - ) - - chat_inputs = [ - image, - vqa_input, - text_decoding_method, - temperature, - length_penalty, - repetition_penalty, - max_length, - min_length, - num_beams, - top_p, - history_orig, - history_qa, - ] - chat_outputs = [ - chatbot, - history_orig, - history_qa, - ] - vqa_input.submit( - fn=chat, - inputs=chat_inputs, - outputs=chat_outputs, - ).success( - fn=lambda: "", - outputs=vqa_input, - queue=False, - api_name=False, - ) - chat_button.click( - fn=chat, - inputs=chat_inputs, - outputs=chat_outputs, - api_name="chat", - ).success( - fn=lambda: "", - outputs=vqa_input, - queue=False, - api_name=False, - ) - clear_chat_button.click( - fn=lambda: ("", [], [], []), - inputs=None, - outputs=[ - vqa_input, - chatbot, - history_orig, - history_qa, - ], - queue=False, - api_name="clear", - ) - image.change( - fn=lambda: ("", [], [], []), - inputs=None, - outputs=[ - caption_output, - chatbot, - history_orig, - history_qa, - ], - queue=False, - ) - -if __name__ == "__main__": - demo.queue(max_size=10).launch() diff --git a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/backbones/__init__.py b/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/backbones/__init__.py deleted file mode 100644 index 94288c3af835e3513ddc70eb4cfb7f7e86852e3f..0000000000000000000000000000000000000000 --- a/spaces/hyxue/HiFiFace-inference-demo/Deep3DFaceRecon_pytorch/models/arcface_torch/backbones/__init__.py +++ /dev/null @@ -1,157 +0,0 @@ -from .iresnet import iresnet100 -from .iresnet import iresnet18 -from .iresnet import iresnet200 -from .iresnet import iresnet34 -from .iresnet import iresnet50 -from .mobilefacenet import get_mbf - - -def get_model(name, **kwargs): - # resnet - if name == "r18": - return iresnet18(False, **kwargs) - elif name == "r34": - return iresnet34(False, **kwargs) - elif name == "r50": - return iresnet50(False, **kwargs) - elif name == "r100": - return iresnet100(False, **kwargs) - elif name == "r200": - return iresnet200(False, **kwargs) - elif name == "r2060": - from .iresnet2060 import iresnet2060 - - return iresnet2060(False, **kwargs) - - elif name == "mbf": - fp16 = kwargs.get("fp16", False) - num_features = kwargs.get("num_features", 512) - return get_mbf(fp16=fp16, num_features=num_features) - - elif name == "mbf_large": - from .mobilefacenet import get_mbf_large - - fp16 = kwargs.get("fp16", False) - num_features = kwargs.get("num_features", 512) - return get_mbf_large(fp16=fp16, num_features=num_features) - - elif name == "vit_t": - num_features = kwargs.get("num_features", 512) - from .vit import VisionTransformer - - return VisionTransformer( - img_size=112, - patch_size=9, - num_classes=num_features, - embed_dim=256, - depth=12, - num_heads=8, - drop_path_rate=0.1, - norm_layer="ln", - mask_ratio=0.1, - ) - - elif name == "vit_t_dp005_mask0": # For WebFace42M - num_features = kwargs.get("num_features", 512) - from .vit import VisionTransformer - - return VisionTransformer( - img_size=112, - patch_size=9, - num_classes=num_features, - embed_dim=256, - depth=12, - num_heads=8, - drop_path_rate=0.05, - norm_layer="ln", - mask_ratio=0.0, - ) - - elif name == "vit_s": - num_features = kwargs.get("num_features", 512) - from .vit import VisionTransformer - - return VisionTransformer( - img_size=112, - patch_size=9, - num_classes=num_features, - embed_dim=512, - depth=12, - num_heads=8, - drop_path_rate=0.1, - norm_layer="ln", - mask_ratio=0.1, - ) - - elif name == "vit_s_dp005_mask_0": # For WebFace42M - num_features = kwargs.get("num_features", 512) - from .vit import VisionTransformer - - return VisionTransformer( - img_size=112, - patch_size=9, - num_classes=num_features, - embed_dim=512, - depth=12, - num_heads=8, - drop_path_rate=0.05, - norm_layer="ln", - mask_ratio=0.0, - ) - - elif name == "vit_b": - # this is a feature - num_features = kwargs.get("num_features", 512) - from .vit import VisionTransformer - - return VisionTransformer( - img_size=112, - patch_size=9, - num_classes=num_features, - embed_dim=512, - depth=24, - num_heads=8, - drop_path_rate=0.1, - norm_layer="ln", - mask_ratio=0.1, - using_checkpoint=True, - ) - - elif name == "vit_b_dp005_mask_005": # For WebFace42M - # this is a feature - num_features = kwargs.get("num_features", 512) - from .vit import VisionTransformer - - return VisionTransformer( - img_size=112, - patch_size=9, - num_classes=num_features, - embed_dim=512, - depth=24, - num_heads=8, - drop_path_rate=0.05, - norm_layer="ln", - mask_ratio=0.05, - using_checkpoint=True, - ) - - elif name == "vit_l_dp005_mask_005": # For WebFace42M - # this is a feature - num_features = kwargs.get("num_features", 512) - from .vit import VisionTransformer - - return VisionTransformer( - img_size=112, - patch_size=9, - num_classes=num_features, - embed_dim=768, - depth=24, - num_heads=8, - drop_path_rate=0.05, - norm_layer="ln", - mask_ratio=0.05, - using_checkpoint=True, - ) - - else: - raise ValueError() diff --git a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r2060.py b/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r2060.py deleted file mode 100644 index 23ad81e082c4b6390b67b164d0ceb84bb0635684..0000000000000000000000000000000000000000 --- a/spaces/iamironman4279/SadTalker/src/face3d/models/arcface_torch/configs/ms1mv3_r2060.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "arcface" -config.network = "r2060" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 64 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/ms1m-retinaface-t1" -config.num_classes = 93431 -config.num_image = 5179510 -config.num_epoch = 25 -config.warmup_epoch = -1 -config.decay_epoch = [10, 16, 22] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/iamkhadke/chatbot/app.py b/spaces/iamkhadke/chatbot/app.py deleted file mode 100644 index 67ff3ba44128e2df35e16d4e7d22ea25026a366d..0000000000000000000000000000000000000000 --- a/spaces/iamkhadke/chatbot/app.py +++ /dev/null @@ -1,16 +0,0 @@ -from transformers import AutoModelForCausalLM, AutoTokenizer -from instruct_pipeline import InstructionTextGenerationPipeline -import torch -tokenizer = AutoTokenizer.from_pretrained("databricks/dolly-v2-3b", padding_side="left") -model = AutoModelForCausalLM.from_pretrained("databricks/dolly-v2-3b", torch_dtype=torch.bfloat16) - -generate_text = InstructionTextGenerationPipeline(model=model, tokenizer=tokenizer) - -import gradio as gr - -def greet(question): - return generate_text(question) - - -iface = gr.Interface(fn=greet, inputs="text", outputs="text") -iface.launch() \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/CRACK Microsoft Toolkit 2.6.4 For Windows And Office Activation __EXCLUSIVE__.md b/spaces/inplisQlawa/anything-midjourney-v4-1/CRACK Microsoft Toolkit 2.6.4 For Windows And Office Activation __EXCLUSIVE__.md deleted file mode 100644 index ea1546bfd35af8d9531fec797ee2fc0e87febed9..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/CRACK Microsoft Toolkit 2.6.4 For Windows And Office Activation __EXCLUSIVE__.md +++ /dev/null @@ -1,6 +0,0 @@ -

      CRACK Microsoft Toolkit 2.6.4 for Windows and Office Activation


      Downloadhttps://urlin.us/2uEx7R



      -
      -Microsoft Toolkit 2.6.8 Activator employs the features for the management of licensing, deploying as well as activating both Microsoft Office and Windows. 1fdad05405
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/CRACK Positive Grid - BIAS FX Desktop 1.6.6.3633 (VST AAX) X64 BETTER.md b/spaces/inplisQlawa/anything-midjourney-v4-1/CRACK Positive Grid - BIAS FX Desktop 1.6.6.3633 (VST AAX) X64 BETTER.md deleted file mode 100644 index 355af2b8d9e9457956d423d3f8bb171d7bcc264a..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/CRACK Positive Grid - BIAS FX Desktop 1.6.6.3633 (VST AAX) X64 BETTER.md +++ /dev/null @@ -1,6 +0,0 @@ -

      CRACK Positive Grid - BIAS FX Desktop 1.6.6.3633 (VST, AAX) x64


      DOWNLOAD ✪✪✪ https://urlin.us/2uEw5v



      - -Cisco network magic pro 5.5.9195 serial numbers, cracks and keygens ... CRACK Positive Grid - BIAS FX Desktop 1.6.6.3633 (VST, AAX) x64 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Driver Thinkcentre Daytona A Rev 33.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Driver Thinkcentre Daytona A Rev 33.md deleted file mode 100644 index fd96e9acb9083a1932d195d3ac1ab0bc81304da9..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Driver Thinkcentre Daytona A Rev 33.md +++ /dev/null @@ -1,135 +0,0 @@ -
      -

      Driver Thinkcentre Daytona A Rev 33: What You Need to Know

      - -

      If you are looking for a driver for your Lenovo Thinkcentre Daytona A Rev 33 motherboard, you might have a hard time finding it online. This is because this motherboard is an OEM product that was customized for specific models of Lenovo desktop computers. Therefore, the driver for this motherboard is not available on the official Lenovo website or other driver download sites.

      -

      Driver Thinkcentre Daytona A Rev 33


      DOWNLOADhttps://urlin.us/2uEvk2



      - -

      However, this does not mean that you cannot use your Thinkcentre Daytona A Rev 33 motherboard with Windows. You just need to find the right driver that matches your system requirements and specifications. In this article, we will show you how to do that and where to download the driver thinkcentre daytona a rev 33 for free.

      - -

      How to Find the Right Driver Thinkcentre Daytona A Rev 33

      - -

      To find the right driver thinkcentre daytona a rev 33 for your PC, you need to follow these steps:

      - -
        -
      1. Identify the model and serial number of your Lenovo desktop computer. You can find this information on a sticker on the back or side of your PC or on the BIOS screen.
      2. -
      3. Go to the Lenovo support website and enter your model and serial number in the search box. This will take you to the product page of your PC.
      4. -
      5. On the product page, click on Drivers & Software and select your operating system (Windows 7, Windows 8, Windows 10, etc.).
      6. -
      7. Look for the Audio driver category and download the latest version of the Realtek HD Audio Driver. This is the driver that supports the Thinkcentre Daytona A Rev 33 motherboard.
      8. -
      9. Save the downloaded file on your PC and run it as an administrator. Follow the instructions to install the driver thinkcentre daytona a rev 33 on your PC.
      10. -
      11. Restart your PC and check if your audio device is working properly.
      12. -
      - -

      If you cannot find the Realtek HD Audio Driver on the Lenovo support website or if it does not work for your PC, you can try another method to find the driver thinkcentre daytona a rev 33.

      -

      - -

      How to Download Driver Thinkcentre Daytona A Rev 33 from Third-Party Sites

      - -

      If you cannot find or install the driver thinkcentre daytona a rev 33 from the Lenovo support website, you can try downloading it from third-party sites that offer driver downloads for various devices. However, you need to be careful when using these sites as some of them may contain malware or viruses that can harm your PC.

      - -

      To download driver thinkcentre daytona a rev 33 from third-party sites, you need to follow these steps:

      - -
        -
      1. Go to a reputable and trusted driver download site, such as DriverGuide, DriversCollection, or DriverIdentifier.
      2. -
      3. Search for driver thinkcentre daytona a rev 33 or Lenovo Thinkcentre Daytona A Rev 33 motherboard in the search box.
      4. -
      5. Choose the driver that matches your system requirements and specifications. Make sure it is compatible with your operating system and has positive reviews and ratings from other users.
      6. -
      7. Download the driver file on your PC and scan it with an antivirus software before opening it.
      8. -
      9. Run the driver file as an administrator and follow the instructions to install it on your PC.
      10. -
      11. Restart your PC and check if your audio device is working properly.
      12. -
      - -

      If you still have problems or errors with your audio device after installing driver thinkcentre daytona a rev 33, you can contact Lenovo customer support or visit their forum for more help and guidance.

      - -

      Conclusion

      - -

      Driver thinkcentre daytona a rev 33 is a driver that supports the Lenovo Thinkcentre Daytona A Rev 33 motherboard, which is an OEM product that was customized for specific models of Lenovo desktop computers. You can find and download this driver from the Lenovo support website or from third-party sites that offer driver downloads for various devices. However, you need to be careful when using these sites as some of them may contain malware or viruses that can harm your PC.

      - -

      In this article, we have shown you how to find and download driver thinkcentre daytona a rev 33 for free and how to install it on your PC. We hope you have found this article helpful and informative.

      - -

      If you have any questions or feedback about driver thinkcentre daytona a rev 33, feel free to leave a comment below or contact us through our website.

      -

      How to Update Driver Thinkcentre Daytona A Rev 33

      - -

      If you want to update driver thinkcentre daytona a rev 33 to the latest version or fix any issues or bugs, you can do these things:

      - -
        -
      1. Go to the Device Manager and find your audio device under the Sound, video and game controllers category.
      2. -
      3. Right-click on your audio device and select Update driver.
      4. -
      5. Choose Search automatically for updated driver software and wait for Windows to find and install the latest driver thinkcentre daytona a rev 33 for your PC.
      6. -
      7. Restart your PC and check if your audio device is working properly.
      8. -
      - -

      If Windows cannot find or install the latest driver thinkcentre daytona a rev 33 for your PC, you can try another method to update it.

      - -

      How to Download Driver Thinkcentre Daytona A Rev 33 from Lenovo Support Website

      - -

      If you want to download driver thinkcentre daytona a rev 33 from the Lenovo support website, you can follow these steps:

      - -
        -
      1. Go to the Lenovo support website and enter your model and serial number in the search box. This will take you to the product page of your PC.
      2. -
      3. On the product page, click on Drivers & Software and select your operating system (Windows 7, Windows 8, Windows 10, etc.).
      4. -
      5. Look for the Audio driver category and download the latest version of the Realtek HD Audio Driver. This is the driver that supports the Thinkcentre Daytona A Rev 33 motherboard.
      6. -
      7. Save the downloaded file on your PC and run it as an administrator. Follow the instructions to install driver thinkcentre daytona a rev 33 on your PC.
      8. -
      9. Restart your PC and check if your audio device is working properly.
      10. -
      - -

      This is the best way to download driver thinkcentre daytona a rev 33 from the Lenovo support website as it ensures that you get the official and compatible driver for your PC.

      -

      How to Backup Driver Thinkcentre Daytona A Rev 33

      - -

      If you want to backup driver thinkcentre daytona a rev 33 for future use or restore it in case of any problems or errors, you can do these things:

      - -
        -
      1. Go to the Device Manager and find your audio device under the Sound, video and game controllers category.
      2. -
      3. Right-click on your audio device and select Properties.
      4. -
      5. Go to the Driver tab and click on Driver Details.
      6. -
      7. Note down the name and location of the driver file (usually something like C:\\Windows\\System32\\drivers\\RTKVHD64.sys).
      8. -
      9. Copy the driver file to a safe location, such as a USB flash drive or an external hard drive.
      10. -
      11. If you need to restore driver thinkcentre daytona a rev 33, you can copy the driver file back to its original location and restart your PC.
      12. -
      - -

      This is a simple way to backup driver thinkcentre daytona a rev 33 for future use or restore it in case of any problems or errors.

      - -

      How to Uninstall Driver Thinkcentre Daytona A Rev 33

      - -

      If you want to uninstall driver thinkcentre daytona a rev 33 from your PC, you can follow these steps:

      - -
        -
      1. Go to the Control Panel and click on Programs and Features.
      2. -
      3. Find Realtek HD Audio Driver in the list of installed programs and click on Uninstall.
      4. -
      5. Follow the instructions to complete the uninstallation process.
      6. -
      7. Delete the driver file from its original location if you still have it.
      8. -
      - -

      This is how you can uninstall driver thinkcentre daytona a rev 33 from your PC. However, you should be careful when doing this as it may affect your audio device or other devices that use the same driver.

      -

      How to Fix Driver Thinkcentre Daytona A Rev 33 Errors

      - -

      Sometimes, you may encounter some errors or issues with driver thinkcentre daytona a rev 33 that may affect your audio device or other devices that use the same driver. Here are some common errors and how to fix them:

      - -
        -
      • Error code 10: This means that the driver thinkcentre daytona a rev 33 is not installed properly or is corrupted. To fix this, you can try to update or reinstall the driver thinkcentre daytona a rev 33 from the Lenovo support website or from third-party sites.
      • -
      • Error code 28: This means that the driver thinkcentre daytona a rev 33 is not installed on your PC. To fix this, you can try to install the driver thinkcentre daytona a rev 33 from the Lenovo support website or from third-party sites.
      • -
      • Error code 39: This means that the driver thinkcentre daytona a rev 33 is missing or corrupted. To fix this, you can try to delete the driver file from its original location and reinstall the driver thinkcentre daytona a rev 33 from the Lenovo support website or from third-party sites.
      • -
      • Error code 52: This means that the driver thinkcentre daytona a rev 33 is not digitally signed or verified by Windows. To fix this, you can try to disable the driver signature enforcement on your PC and reinstall the driver thinkcentre daytona a rev 33 from the Lenovo support website or from third-party sites.
      • -
      - -

      These are some of the common errors and how to fix them with driver thinkcentre daytona a rev 33. However, if you still have errors or issues with your audio device or other devices that use the same driver, you can contact Lenovo customer support or visit their forum for more help and guidance.

      - -

      How to Optimize Driver Thinkcentre Daytona A Rev 33 for Better Performance

      - -

      If you want to optimize driver thinkcentre daytona a rev 33 for better performance and sound quality, you can do these things:

      - -
        -
      1. Adjust the audio settings on your PC according to your preferences and needs. You can use the Realtek HD Audio Manager or the Windows Sound Settings to change the volume, balance, equalizer, and other options.
      2. -
      3. Update driver thinkcentre daytona a rev 33 regularly to get the latest features and improvements. You can use the Device Manager or the Lenovo support website to check for updates and install them on your PC.
      4. -
      5. Clean up your PC and remove any unnecessary files or programs that may slow down your PC or interfere with driver thinkcentre daytona a rev 33. You can use a disk cleanup tool or an uninstaller software to do this.
      6. -
      7. Scan your PC for any malware or viruses that may harm your PC or affect driver thinkcentre daytona a rev 33. You can use an antivirus software or an anti-malware software to do this.
      8. -
      - -

      These are some of the ways to optimize driver thinkcentre daytona a rev 33 for better performance and sound quality. However, you should also be careful not to overdo it or change any settings that may damage your PC or audio device.

      -

      Conclusion

      - -

      Driver thinkcentre daytona a rev 33 is a driver that supports the Lenovo Thinkcentre Daytona A Rev 33 motherboard, which is an OEM product that was customized for specific models of Lenovo desktop computers. You can find and download this driver from the Lenovo support website or from third-party sites that offer driver downloads for various devices. However, you need to be careful when using these sites as some of them may contain malware or viruses that can harm your PC.

      - -

      In this article, we have shown you how to find, download, install, update, backup, uninstall, troubleshoot, and optimize driver thinkcentre daytona a rev 33 for your PC. We have also explained some of the features and benefits of using driver thinkcentre daytona a rev 33 for your audio device and other devices that use the same driver. We hope you have found this article helpful and informative.

      - -

      If you have any questions or feedback about driver thinkcentre daytona a rev 33, feel free to leave a comment below or contact us through our website.

      3cee63e6c2
      -
      -
      \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (The Chronicles Of Narnia 3 In Hindi ).md b/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (The Chronicles Of Narnia 3 In Hindi ).md deleted file mode 100644 index 076d56a092e9e31066741fe0b62c153aaa9bc0e3..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (The Chronicles Of Narnia 3 In Hindi ).md +++ /dev/null @@ -1,6 +0,0 @@ -

      HD Online Player (The Chronicles Of Narnia 3 In Hindi )


      Download Filehttps://urlin.us/2uEwT7



      -
      -Spiral: From the Book of Saw - HEVC/MKV 4K Ultra HD Teaser Trailer (HEVC) ... Captain Marvel 2019 HDTS Dual Audio [Hindi + English] 720p x264 AAC ... a mp4 as pretty much every bluray player out there supports playback of mp4 (a lot ... (2011) The Chronicles Of Narnia :The Voyage Of The Dawn Treader (2010) The ... 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/inreVtussa/clothingai/Examples/Autodesk 3ds Max 2011 !!EXCLUSIVE!! Xforce Keygen 12.md b/spaces/inreVtussa/clothingai/Examples/Autodesk 3ds Max 2011 !!EXCLUSIVE!! Xforce Keygen 12.md deleted file mode 100644 index c099d383bb1bfe4dcfbc35fdf9a49b27b28109ad..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/Autodesk 3ds Max 2011 !!EXCLUSIVE!! Xforce Keygen 12.md +++ /dev/null @@ -1,9 +0,0 @@ -

      autodesk 3ds max 2011 xforce keygen 12


      Download File » https://tiurll.com/2uCjBJ



      - -Oct 29 2015 - 2; 0x0108 1; 16035; 2011 1; 2016 1; 2017 1; 2018 1; 2018.2 1; 2020 3; 2020-2021 version 2; 2021 2; 3D design 1; 3d max 2; 3Ds Max 4. 0 2; 4.0. -4 ; 2018-2018; 2019-2019; 2020; 2021; 2022; 2023; 2024; 2025; 2026; 2027; 2028; 2029; 2030; 2031. -3D design 2 ; 3DS MAX 5.0; 3DS MAX 6.0; 3DS MAX 7.0; 3DS MAX 8.0; 3DS MAX 9.0; 3DS MAX 10; 3DS MAX 16; 3DS MAX 2018; 3DS MAX 2019; 3DS MAX 2020; 3DS MAX 2020; 3DS Max 2021. -5D; 5DS MAX; 5DS MAX 9; 5DS MAX 10 ; 5DS MAX 11; 5DS MAX 12; 5DS MAX 13; 5DS MAX 14; 5DS MAX 15; 5DS MAX 2016; 5DS MAX 8a78ff9644
      -
      -
      -

      diff --git a/spaces/jatinshah/hn-search/app.py b/spaces/jatinshah/hn-search/app.py deleted file mode 100644 index 72304bf3fc46e73d2e30dc307c27b3350193717a..0000000000000000000000000000000000000000 --- a/spaces/jatinshah/hn-search/app.py +++ /dev/null @@ -1,41 +0,0 @@ -import streamlit as st -import datetime -import humanize -import streamlit.components.v1 as components - -from query import get_results - -search_query = st.sidebar.text_input('Query', max_chars=30, value='best databases') -score = st.sidebar.number_input('#Upvotes', value=5, max_value=1000) -comments = st.sidebar.number_input('#Comments', value=2, max_value=1000) -n_results = st.sidebar.number_input('#Results', value=10, max_value=50) -since = st.sidebar.date_input('Since', value=datetime.date(2000, 1, 1)) - -st.header('AskHN Search') -dt = datetime.datetime( - year=since.year, - month=since.month, - day=since.day, -) -ts = dt.timestamp() - -results = get_results(search_query, score, comments, n_results, ts) - -results_html = ''' - -''' -results_html += '
        ' - -for i, c in enumerate(results): - ts = datetime.datetime.fromtimestamp(c['metadata']['timestamp']).strftime("%Y-%m-%d") - link_html = f"{c['metadata']['title']}" - results_html += f"
      1. {link_html}
      2. " - ts_humanize = humanize.naturaltime(datetime.datetime.fromtimestamp(c['metadata']['timestamp'])) - link_metadata_html = f"
        {c['metadata']['score']} points by {c['metadata']['author']} {ts_humanize} ago | {c['metadata']['comments']} comments
        " - results_html += link_metadata_html - -results_html += '''
      - -''' -print(results_html) -st.markdown(results_html, unsafe_allow_html=True) diff --git a/spaces/jbetker/tortoise/models/vocoder.py b/spaces/jbetker/tortoise/models/vocoder.py deleted file mode 100644 index d38fb56699c035b3d4a86ace67c567d3f1d51fa9..0000000000000000000000000000000000000000 --- a/spaces/jbetker/tortoise/models/vocoder.py +++ /dev/null @@ -1,325 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - -MAX_WAV_VALUE = 32768.0 - -class KernelPredictor(torch.nn.Module): - ''' Kernel predictor for the location-variable convolutions''' - - def __init__( - self, - cond_channels, - conv_in_channels, - conv_out_channels, - conv_layers, - conv_kernel_size=3, - kpnet_hidden_channels=64, - kpnet_conv_size=3, - kpnet_dropout=0.0, - kpnet_nonlinear_activation="LeakyReLU", - kpnet_nonlinear_activation_params={"negative_slope": 0.1}, - ): - ''' - Args: - cond_channels (int): number of channel for the conditioning sequence, - conv_in_channels (int): number of channel for the input sequence, - conv_out_channels (int): number of channel for the output sequence, - conv_layers (int): number of layers - ''' - super().__init__() - - self.conv_in_channels = conv_in_channels - self.conv_out_channels = conv_out_channels - self.conv_kernel_size = conv_kernel_size - self.conv_layers = conv_layers - - kpnet_kernel_channels = conv_in_channels * conv_out_channels * conv_kernel_size * conv_layers # l_w - kpnet_bias_channels = conv_out_channels * conv_layers # l_b - - self.input_conv = nn.Sequential( - nn.utils.weight_norm(nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)), - getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), - ) - - self.residual_convs = nn.ModuleList() - padding = (kpnet_conv_size - 1) // 2 - for _ in range(3): - self.residual_convs.append( - nn.Sequential( - nn.Dropout(kpnet_dropout), - nn.utils.weight_norm( - nn.Conv1d(kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding, - bias=True)), - getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), - nn.utils.weight_norm( - nn.Conv1d(kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding, - bias=True)), - getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params), - ) - ) - self.kernel_conv = nn.utils.weight_norm( - nn.Conv1d(kpnet_hidden_channels, kpnet_kernel_channels, kpnet_conv_size, padding=padding, bias=True)) - self.bias_conv = nn.utils.weight_norm( - nn.Conv1d(kpnet_hidden_channels, kpnet_bias_channels, kpnet_conv_size, padding=padding, bias=True)) - - def forward(self, c): - ''' - Args: - c (Tensor): the conditioning sequence (batch, cond_channels, cond_length) - ''' - batch, _, cond_length = c.shape - c = self.input_conv(c) - for residual_conv in self.residual_convs: - residual_conv.to(c.device) - c = c + residual_conv(c) - k = self.kernel_conv(c) - b = self.bias_conv(c) - kernels = k.contiguous().view( - batch, - self.conv_layers, - self.conv_in_channels, - self.conv_out_channels, - self.conv_kernel_size, - cond_length, - ) - bias = b.contiguous().view( - batch, - self.conv_layers, - self.conv_out_channels, - cond_length, - ) - - return kernels, bias - - def remove_weight_norm(self): - nn.utils.remove_weight_norm(self.input_conv[0]) - nn.utils.remove_weight_norm(self.kernel_conv) - nn.utils.remove_weight_norm(self.bias_conv) - for block in self.residual_convs: - nn.utils.remove_weight_norm(block[1]) - nn.utils.remove_weight_norm(block[3]) - - -class LVCBlock(torch.nn.Module): - '''the location-variable convolutions''' - - def __init__( - self, - in_channels, - cond_channels, - stride, - dilations=[1, 3, 9, 27], - lReLU_slope=0.2, - conv_kernel_size=3, - cond_hop_length=256, - kpnet_hidden_channels=64, - kpnet_conv_size=3, - kpnet_dropout=0.0, - ): - super().__init__() - - self.cond_hop_length = cond_hop_length - self.conv_layers = len(dilations) - self.conv_kernel_size = conv_kernel_size - - self.kernel_predictor = KernelPredictor( - cond_channels=cond_channels, - conv_in_channels=in_channels, - conv_out_channels=2 * in_channels, - conv_layers=len(dilations), - conv_kernel_size=conv_kernel_size, - kpnet_hidden_channels=kpnet_hidden_channels, - kpnet_conv_size=kpnet_conv_size, - kpnet_dropout=kpnet_dropout, - kpnet_nonlinear_activation_params={"negative_slope": lReLU_slope} - ) - - self.convt_pre = nn.Sequential( - nn.LeakyReLU(lReLU_slope), - nn.utils.weight_norm(nn.ConvTranspose1d(in_channels, in_channels, 2 * stride, stride=stride, - padding=stride // 2 + stride % 2, output_padding=stride % 2)), - ) - - self.conv_blocks = nn.ModuleList() - for dilation in dilations: - self.conv_blocks.append( - nn.Sequential( - nn.LeakyReLU(lReLU_slope), - nn.utils.weight_norm(nn.Conv1d(in_channels, in_channels, conv_kernel_size, - padding=dilation * (conv_kernel_size - 1) // 2, dilation=dilation)), - nn.LeakyReLU(lReLU_slope), - ) - ) - - def forward(self, x, c): - ''' forward propagation of the location-variable convolutions. - Args: - x (Tensor): the input sequence (batch, in_channels, in_length) - c (Tensor): the conditioning sequence (batch, cond_channels, cond_length) - - Returns: - Tensor: the output sequence (batch, in_channels, in_length) - ''' - _, in_channels, _ = x.shape # (B, c_g, L') - - x = self.convt_pre(x) # (B, c_g, stride * L') - kernels, bias = self.kernel_predictor(c) - - for i, conv in enumerate(self.conv_blocks): - output = conv(x) # (B, c_g, stride * L') - - k = kernels[:, i, :, :, :, :] # (B, 2 * c_g, c_g, kernel_size, cond_length) - b = bias[:, i, :, :] # (B, 2 * c_g, cond_length) - - output = self.location_variable_convolution(output, k, b, - hop_size=self.cond_hop_length) # (B, 2 * c_g, stride * L'): LVC - x = x + torch.sigmoid(output[:, :in_channels, :]) * torch.tanh( - output[:, in_channels:, :]) # (B, c_g, stride * L'): GAU - - return x - - def location_variable_convolution(self, x, kernel, bias, dilation=1, hop_size=256): - ''' perform location-variable convolution operation on the input sequence (x) using the local convolution kernl. - Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100. - Args: - x (Tensor): the input sequence (batch, in_channels, in_length). - kernel (Tensor): the local convolution kernel (batch, in_channel, out_channels, kernel_size, kernel_length) - bias (Tensor): the bias for the local convolution (batch, out_channels, kernel_length) - dilation (int): the dilation of convolution. - hop_size (int): the hop_size of the conditioning sequence. - Returns: - (Tensor): the output sequence after performing local convolution. (batch, out_channels, in_length). - ''' - batch, _, in_length = x.shape - batch, _, out_channels, kernel_size, kernel_length = kernel.shape - assert in_length == (kernel_length * hop_size), "length of (x, kernel) is not matched" - - padding = dilation * int((kernel_size - 1) / 2) - x = F.pad(x, (padding, padding), 'constant', 0) # (batch, in_channels, in_length + 2*padding) - x = x.unfold(2, hop_size + 2 * padding, hop_size) # (batch, in_channels, kernel_length, hop_size + 2*padding) - - if hop_size < dilation: - x = F.pad(x, (0, dilation), 'constant', 0) - x = x.unfold(3, dilation, - dilation) # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation) - x = x[:, :, :, :, :hop_size] - x = x.transpose(3, 4) # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation) - x = x.unfold(4, kernel_size, 1) # (batch, in_channels, kernel_length, dilation, _, kernel_size) - - o = torch.einsum('bildsk,biokl->bolsd', x, kernel) - o = o.to(memory_format=torch.channels_last_3d) - bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d) - o = o + bias - o = o.contiguous().view(batch, out_channels, -1) - - return o - - def remove_weight_norm(self): - self.kernel_predictor.remove_weight_norm() - nn.utils.remove_weight_norm(self.convt_pre[1]) - for block in self.conv_blocks: - nn.utils.remove_weight_norm(block[1]) - - -class UnivNetGenerator(nn.Module): - """UnivNet Generator""" - - def __init__(self, noise_dim=64, channel_size=32, dilations=[1,3,9,27], strides=[8,8,4], lReLU_slope=.2, kpnet_conv_size=3, - # Below are MEL configurations options that this generator requires. - hop_length=256, n_mel_channels=100): - super(UnivNetGenerator, self).__init__() - self.mel_channel = n_mel_channels - self.noise_dim = noise_dim - self.hop_length = hop_length - channel_size = channel_size - kpnet_conv_size = kpnet_conv_size - - self.res_stack = nn.ModuleList() - hop_length = 1 - for stride in strides: - hop_length = stride * hop_length - self.res_stack.append( - LVCBlock( - channel_size, - n_mel_channels, - stride=stride, - dilations=dilations, - lReLU_slope=lReLU_slope, - cond_hop_length=hop_length, - kpnet_conv_size=kpnet_conv_size - ) - ) - - self.conv_pre = \ - nn.utils.weight_norm(nn.Conv1d(noise_dim, channel_size, 7, padding=3, padding_mode='reflect')) - - self.conv_post = nn.Sequential( - nn.LeakyReLU(lReLU_slope), - nn.utils.weight_norm(nn.Conv1d(channel_size, 1, 7, padding=3, padding_mode='reflect')), - nn.Tanh(), - ) - - def forward(self, c, z): - ''' - Args: - c (Tensor): the conditioning sequence of mel-spectrogram (batch, mel_channels, in_length) - z (Tensor): the noise sequence (batch, noise_dim, in_length) - - ''' - z = self.conv_pre(z) # (B, c_g, L) - - for res_block in self.res_stack: - res_block.to(z.device) - z = res_block(z, c) # (B, c_g, L * s_0 * ... * s_i) - - z = self.conv_post(z) # (B, 1, L * 256) - - return z - - def eval(self, inference=False): - super(UnivNetGenerator, self).eval() - # don't remove weight norm while validation in training loop - if inference: - self.remove_weight_norm() - - def remove_weight_norm(self): - print('Removing weight norm...') - - nn.utils.remove_weight_norm(self.conv_pre) - - for layer in self.conv_post: - if len(layer.state_dict()) != 0: - nn.utils.remove_weight_norm(layer) - - for res_block in self.res_stack: - res_block.remove_weight_norm() - - def inference(self, c, z=None): - # pad input mel with zeros to cut artifact - # see https://github.com/seungwonpark/melgan/issues/8 - zero = torch.full((c.shape[0], self.mel_channel, 10), -11.5129).to(c.device) - mel = torch.cat((c, zero), dim=2) - - if z is None: - z = torch.randn(c.shape[0], self.noise_dim, mel.size(2)).to(mel.device) - - audio = self.forward(mel, z) - audio = audio[:, :, :-(self.hop_length * 10)] - audio = audio.clamp(min=-1, max=1) - return audio - - -if __name__ == '__main__': - model = UnivNetGenerator() - - c = torch.randn(3, 100, 10) - z = torch.randn(3, 64, 10) - print(c.shape) - - y = model(c, z) - print(y.shape) - assert y.shape == torch.Size([3, 1, 2560]) - - pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) - print(pytorch_total_params) diff --git a/spaces/jerpint/buster/generate_embeddings.py b/spaces/jerpint/buster/generate_embeddings.py deleted file mode 100644 index 476aaaa981866da9760ea088df37cc345c207ef4..0000000000000000000000000000000000000000 --- a/spaces/jerpint/buster/generate_embeddings.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import zipfile -import requests -import pandas as pd -import time - -from buster.documents_manager import DeepLakeDocumentsManager - -from buster.docparser import get_all_documents -from buster.parser import HuggingfaceParser - -hf_transformers_zip_url = "https://huggingface.co/datasets/hf-doc-build/doc-build/resolve/main/transformers/main.zip" - - -def download_and_unzip(zip_url, target_dir, overwrite=False): - """Download a zip file from zip_url and unzip it to target_dir. - - # Example usage - zip_url = "https://example.com/example.zip" - target_dir = "downloaded_files" - download_and_unzip(zip_url, target_dir, overwrite=True) - - ChatGPT generated. - """ - # Create the target directory if it doesn't exist - if not os.path.exists(target_dir): - os.makedirs(target_dir) - - # Get the filename from the zip_url - zip_filename = os.path.basename(zip_url) - target_path = os.path.join(target_dir, zip_filename) - - # Check if the file already exists - if os.path.exists(target_path) and not overwrite: - print(f"{zip_filename} already exists in the target directory.") - return - - # Download the zip file - response = requests.get(zip_url, stream=True) - if response.status_code == 200: - with open(target_path, "wb") as file: - for chunk in response.iter_content(chunk_size=8192): - file.write(chunk) - print(f"{zip_filename} downloaded successfully.") - - # Unzip the file - with zipfile.ZipFile(target_path, "r") as zip_ref: - zip_ref.extractall(target_dir) - print(f"{zip_filename} extracted successfully.") - else: - print(f"Failed to download {zip_filename}. Status code: {response.status_code}") - - -# Download the tranformers html pages and unzip it -download_and_unzip(zip_url=hf_transformers_zip_url, target_dir=".") - -# Extract all documents from the html into a dataframe -df = get_all_documents( - root_dir="transformers/main/en/", - base_url="https://huggingface.co/docs/transformers/main/en/", - parser_cls=HuggingfaceParser, - min_section_length=100, - max_section_length=1000, -) - -# Add the source column -df["source"] = "hf_transformers" - -# Save the .csv with chunks to disk -df.to_csv("hf_transformers.csv") - -# Initialize the vector store -dm = DeepLakeDocumentsManager( - vector_store_path="deeplake_store", - overwrite=True, - required_columns=["url", "content", "source", "title"], -) - -# Add all embeddings to the vector store -dm.batch_add( - df=df, - batch_size=3000, - min_time_interval=60, - num_workers=32, - csv_filename="embeddings.csv", - csv_overwrite=False, -) diff --git a/spaces/jessica6105/Lu-Bert-VITS2/transforms.py b/spaces/jessica6105/Lu-Bert-VITS2/transforms.py deleted file mode 100644 index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000 --- a/spaces/jessica6105/Lu-Bert-VITS2/transforms.py +++ /dev/null @@ -1,209 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = {"tails": tails, "tail_bound": tail_bound} - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 - - -def unconstrained_rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails="linear", - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == "linear": - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError("{} tails are not implemented.".format(tails)) - - ( - outputs[inside_interval_mask], - logabsdet[inside_interval_mask], - ) = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, - right=tail_bound, - bottom=-tail_bound, - top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - ) - - return outputs, logabsdet - - -def rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0.0, - right=1.0, - bottom=0.0, - top=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError("Input to a transform is not within its domain") - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError("Minimal bin width too large for the number of bins") - if min_bin_height * num_bins > 1.0: - raise ValueError("Minimal bin height too large for the number of bins") - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) + input_heights * (input_delta - input_derivatives) - b = input_heights * input_derivatives - (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) - c = -input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * ( - input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta - ) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/jhwen/bingo/postcss.config.js b/spaces/jhwen/bingo/postcss.config.js deleted file mode 100644 index 33ad091d26d8a9dc95ebdf616e217d985ec215b8..0000000000000000000000000000000000000000 --- a/spaces/jhwen/bingo/postcss.config.js +++ /dev/null @@ -1,6 +0,0 @@ -module.exports = { - plugins: { - tailwindcss: {}, - autoprefixer: {}, - }, -} diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_ctr.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_ctr.py deleted file mode 100644 index 81f2be9c3592f0fd366133e54dc2888f8ceef22d..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_ctr.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Cipher/mode_ctr.py : CTR mode -# -# =================================================================== -# The contents of this file are dedicated to the public domain. To -# the extent that dedication to the public domain is not available, -# everyone is granted a worldwide, perpetual, royalty-free, -# non-exclusive license to exercise all rights associated with the -# contents of this file for any purpose whatsoever. -# No rights are reserved. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# =================================================================== - -""" -Counter (CTR) mode. -""" - -__all__ = ['CtrMode'] - -import struct - -from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, VoidPointer, - create_string_buffer, get_raw_buffer, - SmartPointer, c_size_t, c_uint8_ptr, - is_writeable_buffer) - -from Crypto.Random import get_random_bytes -from Crypto.Util.py3compat import _copy_bytes, is_native_int -from Crypto.Util.number import long_to_bytes - -raw_ctr_lib = load_pycryptodome_raw_lib("Crypto.Cipher._raw_ctr", """ - int CTR_start_operation(void *cipher, - uint8_t initialCounterBlock[], - size_t initialCounterBlock_len, - size_t prefix_len, - unsigned counter_len, - unsigned littleEndian, - void **pResult); - int CTR_encrypt(void *ctrState, - const uint8_t *in, - uint8_t *out, - size_t data_len); - int CTR_decrypt(void *ctrState, - const uint8_t *in, - uint8_t *out, - size_t data_len); - int CTR_stop_operation(void *ctrState);""" - ) - - -class CtrMode(object): - """*CounTeR (CTR)* mode. - - This mode is very similar to ECB, in that - encryption of one block is done independently of all other blocks. - - Unlike ECB, the block *position* contributes to the encryption - and no information leaks about symbol frequency. - - Each message block is associated to a *counter* which - must be unique across all messages that get encrypted - with the same key (not just within the same message). - The counter is as big as the block size. - - Counters can be generated in several ways. The most - straightword one is to choose an *initial counter block* - (which can be made public, similarly to the *IV* for the - other modes) and increment its lowest **m** bits by one - (modulo *2^m*) for each block. In most cases, **m** is - chosen to be half the block size. - - See `NIST SP800-38A`_, Section 6.5 (for the mode) and - Appendix B (for how to manage the *initial counter block*). - - .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf - - :undocumented: __init__ - """ - - def __init__(self, block_cipher, initial_counter_block, - prefix_len, counter_len, little_endian): - """Create a new block cipher, configured in CTR mode. - - :Parameters: - block_cipher : C pointer - A smart pointer to the low-level block cipher instance. - - initial_counter_block : bytes/bytearray/memoryview - The initial plaintext to use to generate the key stream. - - It is as large as the cipher block, and it embeds - the initial value of the counter. - - This value must not be reused. - It shall contain a nonce or a random component. - Reusing the *initial counter block* for encryptions - performed with the same key compromises confidentiality. - - prefix_len : integer - The amount of bytes at the beginning of the counter block - that never change. - - counter_len : integer - The length in bytes of the counter embedded in the counter - block. - - little_endian : boolean - True if the counter in the counter block is an integer encoded - in little endian mode. If False, it is big endian. - """ - - if len(initial_counter_block) == prefix_len + counter_len: - self.nonce = _copy_bytes(None, prefix_len, initial_counter_block) - """Nonce; not available if there is a fixed suffix""" - - self._state = VoidPointer() - result = raw_ctr_lib.CTR_start_operation(block_cipher.get(), - c_uint8_ptr(initial_counter_block), - c_size_t(len(initial_counter_block)), - c_size_t(prefix_len), - counter_len, - little_endian, - self._state.address_of()) - if result: - raise ValueError("Error %X while instantiating the CTR mode" - % result) - - # Ensure that object disposal of this Python object will (eventually) - # free the memory allocated by the raw library for the cipher mode - self._state = SmartPointer(self._state.get(), - raw_ctr_lib.CTR_stop_operation) - - # Memory allocated for the underlying block cipher is now owed - # by the cipher mode - block_cipher.release() - - self.block_size = len(initial_counter_block) - """The block size of the underlying cipher, in bytes.""" - - self._next = ["encrypt", "decrypt"] - - def encrypt(self, plaintext, output=None): - """Encrypt data with the key and the parameters set at initialization. - - A cipher object is stateful: once you have encrypted a message - you cannot encrypt (or decrypt) another message using the same - object. - - The data to encrypt can be broken up in two or - more pieces and `encrypt` can be called multiple times. - - That is, the statement: - - >>> c.encrypt(a) + c.encrypt(b) - - is equivalent to: - - >>> c.encrypt(a+b) - - This function does not add any padding to the plaintext. - - :Parameters: - plaintext : bytes/bytearray/memoryview - The piece of data to encrypt. - It can be of any length. - :Keywords: - output : bytearray/memoryview - The location where the ciphertext must be written to. - If ``None``, the ciphertext is returned. - :Return: - If ``output`` is ``None``, the ciphertext is returned as ``bytes``. - Otherwise, ``None``. - """ - - if "encrypt" not in self._next: - raise TypeError("encrypt() cannot be called after decrypt()") - self._next = ["encrypt"] - - if output is None: - ciphertext = create_string_buffer(len(plaintext)) - else: - ciphertext = output - - if not is_writeable_buffer(output): - raise TypeError("output must be a bytearray or a writeable memoryview") - - if len(plaintext) != len(output): - raise ValueError("output must have the same length as the input" - " (%d bytes)" % len(plaintext)) - - result = raw_ctr_lib.CTR_encrypt(self._state.get(), - c_uint8_ptr(plaintext), - c_uint8_ptr(ciphertext), - c_size_t(len(plaintext))) - if result: - if result == 0x60002: - raise OverflowError("The counter has wrapped around in" - " CTR mode") - raise ValueError("Error %X while encrypting in CTR mode" % result) - - if output is None: - return get_raw_buffer(ciphertext) - else: - return None - - def decrypt(self, ciphertext, output=None): - """Decrypt data with the key and the parameters set at initialization. - - A cipher object is stateful: once you have decrypted a message - you cannot decrypt (or encrypt) another message with the same - object. - - The data to decrypt can be broken up in two or - more pieces and `decrypt` can be called multiple times. - - That is, the statement: - - >>> c.decrypt(a) + c.decrypt(b) - - is equivalent to: - - >>> c.decrypt(a+b) - - This function does not remove any padding from the plaintext. - - :Parameters: - ciphertext : bytes/bytearray/memoryview - The piece of data to decrypt. - It can be of any length. - :Keywords: - output : bytearray/memoryview - The location where the plaintext must be written to. - If ``None``, the plaintext is returned. - :Return: - If ``output`` is ``None``, the plaintext is returned as ``bytes``. - Otherwise, ``None``. - """ - - if "decrypt" not in self._next: - raise TypeError("decrypt() cannot be called after encrypt()") - self._next = ["decrypt"] - - if output is None: - plaintext = create_string_buffer(len(ciphertext)) - else: - plaintext = output - - if not is_writeable_buffer(output): - raise TypeError("output must be a bytearray or a writeable memoryview") - - if len(ciphertext) != len(output): - raise ValueError("output must have the same length as the input" - " (%d bytes)" % len(plaintext)) - - result = raw_ctr_lib.CTR_decrypt(self._state.get(), - c_uint8_ptr(ciphertext), - c_uint8_ptr(plaintext), - c_size_t(len(ciphertext))) - if result: - if result == 0x60002: - raise OverflowError("The counter has wrapped around in" - " CTR mode") - raise ValueError("Error %X while decrypting in CTR mode" % result) - - if output is None: - return get_raw_buffer(plaintext) - else: - return None - - -def _create_ctr_cipher(factory, **kwargs): - """Instantiate a cipher object that performs CTR encryption/decryption. - - :Parameters: - factory : module - The underlying block cipher, a module from ``Crypto.Cipher``. - - :Keywords: - nonce : bytes/bytearray/memoryview - The fixed part at the beginning of the counter block - the rest is - the counter number that gets increased when processing the next block. - The nonce must be such that no two messages are encrypted under the - same key and the same nonce. - - The nonce must be shorter than the block size (it can have - zero length; the counter is then as long as the block). - - If this parameter is not present, a random nonce will be created with - length equal to half the block size. No random nonce shorter than - 64 bits will be created though - you must really think through all - security consequences of using such a short block size. - - initial_value : posive integer or bytes/bytearray/memoryview - The initial value for the counter. If not present, the cipher will - start counting from 0. The value is incremented by one for each block. - The counter number is encoded in big endian mode. - - counter : object - Instance of ``Crypto.Util.Counter``, which allows full customization - of the counter block. This parameter is incompatible to both ``nonce`` - and ``initial_value``. - - Any other keyword will be passed to the underlying block cipher. - See the relevant documentation for details (at least ``key`` will need - to be present). - """ - - cipher_state = factory._create_base_cipher(kwargs) - - counter = kwargs.pop("counter", None) - nonce = kwargs.pop("nonce", None) - initial_value = kwargs.pop("initial_value", None) - if kwargs: - raise TypeError("Invalid parameters for CTR mode: %s" % str(kwargs)) - - if counter is not None and (nonce, initial_value) != (None, None): - raise TypeError("'counter' and 'nonce'/'initial_value'" - " are mutually exclusive") - - if counter is None: - # Crypto.Util.Counter is not used - if nonce is None: - if factory.block_size < 16: - raise TypeError("Impossible to create a safe nonce for short" - " block sizes") - nonce = get_random_bytes(factory.block_size // 2) - else: - if len(nonce) >= factory.block_size: - raise ValueError("Nonce is too long") - - # What is not nonce is counter - counter_len = factory.block_size - len(nonce) - - if initial_value is None: - initial_value = 0 - - if is_native_int(initial_value): - if (1 << (counter_len * 8)) - 1 < initial_value: - raise ValueError("Initial counter value is too large") - initial_counter_block = nonce + long_to_bytes(initial_value, counter_len) - else: - if len(initial_value) != counter_len: - raise ValueError("Incorrect length for counter byte string (%d bytes, expected %d)" % - (len(initial_value), counter_len)) - initial_counter_block = nonce + initial_value - - return CtrMode(cipher_state, - initial_counter_block, - len(nonce), # prefix - counter_len, - False) # little_endian - - # Crypto.Util.Counter is used - - # 'counter' used to be a callable object, but now it is - # just a dictionary for backward compatibility. - _counter = dict(counter) - try: - counter_len = _counter.pop("counter_len") - prefix = _counter.pop("prefix") - suffix = _counter.pop("suffix") - initial_value = _counter.pop("initial_value") - little_endian = _counter.pop("little_endian") - except KeyError: - raise TypeError("Incorrect counter object" - " (use Crypto.Util.Counter.new)") - - # Compute initial counter block - words = [] - while initial_value > 0: - words.append(struct.pack('B', initial_value & 255)) - initial_value >>= 8 - words += [b'\x00'] * max(0, counter_len - len(words)) - if not little_endian: - words.reverse() - initial_counter_block = prefix + b"".join(words) + suffix - - if len(initial_counter_block) != factory.block_size: - raise ValueError("Size of the counter block (%d bytes) must match" - " block size (%d)" % (len(initial_counter_block), - factory.block_size)) - - return CtrMode(cipher_state, initial_counter_block, - len(prefix), counter_len, little_endian) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageFilter.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageFilter.py deleted file mode 100644 index 33bc7cc2e30ea9a0f95cc884de151643915848fa..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageFilter.py +++ /dev/null @@ -1,550 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# standard filters -# -# History: -# 1995-11-27 fl Created -# 2002-06-08 fl Added rank and mode filters -# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call -# -# Copyright (c) 1997-2003 by Secret Labs AB. -# Copyright (c) 1995-2002 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# -import functools - - -class Filter: - pass - - -class MultibandFilter(Filter): - pass - - -class BuiltinFilter(MultibandFilter): - def filter(self, image): - if image.mode == "P": - msg = "cannot filter palette images" - raise ValueError(msg) - return image.filter(*self.filterargs) - - -class Kernel(BuiltinFilter): - """ - Create a convolution kernel. The current version only - supports 3x3 and 5x5 integer and floating point kernels. - - In the current version, kernels can only be applied to - "L" and "RGB" images. - - :param size: Kernel size, given as (width, height). In the current - version, this must be (3,3) or (5,5). - :param kernel: A sequence containing kernel weights. The kernel will - be flipped vertically before being applied to the image. - :param scale: Scale factor. If given, the result for each pixel is - divided by this value. The default is the sum of the - kernel weights. - :param offset: Offset. If given, this value is added to the result, - after it has been divided by the scale factor. - """ - - name = "Kernel" - - def __init__(self, size, kernel, scale=None, offset=0): - if scale is None: - # default scale is sum of kernel - scale = functools.reduce(lambda a, b: a + b, kernel) - if size[0] * size[1] != len(kernel): - msg = "not enough coefficients in kernel" - raise ValueError(msg) - self.filterargs = size, scale, offset, kernel - - -class RankFilter(Filter): - """ - Create a rank filter. The rank filter sorts all pixels in - a window of the given size, and returns the ``rank``'th value. - - :param size: The kernel size, in pixels. - :param rank: What pixel value to pick. Use 0 for a min filter, - ``size * size / 2`` for a median filter, ``size * size - 1`` - for a max filter, etc. - """ - - name = "Rank" - - def __init__(self, size, rank): - self.size = size - self.rank = rank - - def filter(self, image): - if image.mode == "P": - msg = "cannot filter palette images" - raise ValueError(msg) - image = image.expand(self.size // 2, self.size // 2) - return image.rankfilter(self.size, self.rank) - - -class MedianFilter(RankFilter): - """ - Create a median filter. Picks the median pixel value in a window with the - given size. - - :param size: The kernel size, in pixels. - """ - - name = "Median" - - def __init__(self, size=3): - self.size = size - self.rank = size * size // 2 - - -class MinFilter(RankFilter): - """ - Create a min filter. Picks the lowest pixel value in a window with the - given size. - - :param size: The kernel size, in pixels. - """ - - name = "Min" - - def __init__(self, size=3): - self.size = size - self.rank = 0 - - -class MaxFilter(RankFilter): - """ - Create a max filter. Picks the largest pixel value in a window with the - given size. - - :param size: The kernel size, in pixels. - """ - - name = "Max" - - def __init__(self, size=3): - self.size = size - self.rank = size * size - 1 - - -class ModeFilter(Filter): - """ - Create a mode filter. Picks the most frequent pixel value in a box with the - given size. Pixel values that occur only once or twice are ignored; if no - pixel value occurs more than twice, the original pixel value is preserved. - - :param size: The kernel size, in pixels. - """ - - name = "Mode" - - def __init__(self, size=3): - self.size = size - - def filter(self, image): - return image.modefilter(self.size) - - -class GaussianBlur(MultibandFilter): - """Blurs the image with a sequence of extended box filters, which - approximates a Gaussian kernel. For details on accuracy see - - - :param radius: Standard deviation of the Gaussian kernel. - """ - - name = "GaussianBlur" - - def __init__(self, radius=2): - self.radius = radius - - def filter(self, image): - return image.gaussian_blur(self.radius) - - -class BoxBlur(MultibandFilter): - """Blurs the image by setting each pixel to the average value of the pixels - in a square box extending radius pixels in each direction. - Supports float radius of arbitrary size. Uses an optimized implementation - which runs in linear time relative to the size of the image - for any radius value. - - :param radius: Size of the box in one direction. Radius 0 does not blur, - returns an identical image. Radius 1 takes 1 pixel - in each direction, i.e. 9 pixels in total. - """ - - name = "BoxBlur" - - def __init__(self, radius): - if radius < 0: - msg = "radius must be >= 0" - raise ValueError(msg) - self.radius = radius - - def filter(self, image): - return image.box_blur(self.radius) - - -class UnsharpMask(MultibandFilter): - """Unsharp mask filter. - - See Wikipedia's entry on `digital unsharp masking`_ for an explanation of - the parameters. - - :param radius: Blur Radius - :param percent: Unsharp strength, in percent - :param threshold: Threshold controls the minimum brightness change that - will be sharpened - - .. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking - - """ # noqa: E501 - - name = "UnsharpMask" - - def __init__(self, radius=2, percent=150, threshold=3): - self.radius = radius - self.percent = percent - self.threshold = threshold - - def filter(self, image): - return image.unsharp_mask(self.radius, self.percent, self.threshold) - - -class BLUR(BuiltinFilter): - name = "Blur" - # fmt: off - filterargs = (5, 5), 16, 0, ( - 1, 1, 1, 1, 1, - 1, 0, 0, 0, 1, - 1, 0, 0, 0, 1, - 1, 0, 0, 0, 1, - 1, 1, 1, 1, 1, - ) - # fmt: on - - -class CONTOUR(BuiltinFilter): - name = "Contour" - # fmt: off - filterargs = (3, 3), 1, 255, ( - -1, -1, -1, - -1, 8, -1, - -1, -1, -1, - ) - # fmt: on - - -class DETAIL(BuiltinFilter): - name = "Detail" - # fmt: off - filterargs = (3, 3), 6, 0, ( - 0, -1, 0, - -1, 10, -1, - 0, -1, 0, - ) - # fmt: on - - -class EDGE_ENHANCE(BuiltinFilter): - name = "Edge-enhance" - # fmt: off - filterargs = (3, 3), 2, 0, ( - -1, -1, -1, - -1, 10, -1, - -1, -1, -1, - ) - # fmt: on - - -class EDGE_ENHANCE_MORE(BuiltinFilter): - name = "Edge-enhance More" - # fmt: off - filterargs = (3, 3), 1, 0, ( - -1, -1, -1, - -1, 9, -1, - -1, -1, -1, - ) - # fmt: on - - -class EMBOSS(BuiltinFilter): - name = "Emboss" - # fmt: off - filterargs = (3, 3), 1, 128, ( - -1, 0, 0, - 0, 1, 0, - 0, 0, 0, - ) - # fmt: on - - -class FIND_EDGES(BuiltinFilter): - name = "Find Edges" - # fmt: off - filterargs = (3, 3), 1, 0, ( - -1, -1, -1, - -1, 8, -1, - -1, -1, -1, - ) - # fmt: on - - -class SHARPEN(BuiltinFilter): - name = "Sharpen" - # fmt: off - filterargs = (3, 3), 16, 0, ( - -2, -2, -2, - -2, 32, -2, - -2, -2, -2, - ) - # fmt: on - - -class SMOOTH(BuiltinFilter): - name = "Smooth" - # fmt: off - filterargs = (3, 3), 13, 0, ( - 1, 1, 1, - 1, 5, 1, - 1, 1, 1, - ) - # fmt: on - - -class SMOOTH_MORE(BuiltinFilter): - name = "Smooth More" - # fmt: off - filterargs = (5, 5), 100, 0, ( - 1, 1, 1, 1, 1, - 1, 5, 5, 5, 1, - 1, 5, 44, 5, 1, - 1, 5, 5, 5, 1, - 1, 1, 1, 1, 1, - ) - # fmt: on - - -class Color3DLUT(MultibandFilter): - """Three-dimensional color lookup table. - - Transforms 3-channel pixels using the values of the channels as coordinates - in the 3D lookup table and interpolating the nearest elements. - - This method allows you to apply almost any color transformation - in constant time by using pre-calculated decimated tables. - - .. versionadded:: 5.2.0 - - :param size: Size of the table. One int or tuple of (int, int, int). - Minimal size in any dimension is 2, maximum is 65. - :param table: Flat lookup table. A list of ``channels * size**3`` - float elements or a list of ``size**3`` channels-sized - tuples with floats. Channels are changed first, - then first dimension, then second, then third. - Value 0.0 corresponds lowest value of output, 1.0 highest. - :param channels: Number of channels in the table. Could be 3 or 4. - Default is 3. - :param target_mode: A mode for the result image. Should have not less - than ``channels`` channels. Default is ``None``, - which means that mode wouldn't be changed. - """ - - name = "Color 3D LUT" - - def __init__(self, size, table, channels=3, target_mode=None, **kwargs): - if channels not in (3, 4): - msg = "Only 3 or 4 output channels are supported" - raise ValueError(msg) - self.size = size = self._check_size(size) - self.channels = channels - self.mode = target_mode - - # Hidden flag `_copy_table=False` could be used to avoid extra copying - # of the table if the table is specially made for the constructor. - copy_table = kwargs.get("_copy_table", True) - items = size[0] * size[1] * size[2] - wrong_size = False - - numpy = None - if hasattr(table, "shape"): - try: - import numpy - except ImportError: # pragma: no cover - pass - - if numpy and isinstance(table, numpy.ndarray): - if copy_table: - table = table.copy() - - if table.shape in [ - (items * channels,), - (items, channels), - (size[2], size[1], size[0], channels), - ]: - table = table.reshape(items * channels) - else: - wrong_size = True - - else: - if copy_table: - table = list(table) - - # Convert to a flat list - if table and isinstance(table[0], (list, tuple)): - table, raw_table = [], table - for pixel in raw_table: - if len(pixel) != channels: - msg = ( - "The elements of the table should " - f"have a length of {channels}." - ) - raise ValueError(msg) - table.extend(pixel) - - if wrong_size or len(table) != items * channels: - msg = ( - "The table should have either channels * size**3 float items " - "or size**3 items of channels-sized tuples with floats. " - f"Table should be: {channels}x{size[0]}x{size[1]}x{size[2]}. " - f"Actual length: {len(table)}" - ) - raise ValueError(msg) - self.table = table - - @staticmethod - def _check_size(size): - try: - _, _, _ = size - except ValueError as e: - msg = "Size should be either an integer or a tuple of three integers." - raise ValueError(msg) from e - except TypeError: - size = (size, size, size) - size = [int(x) for x in size] - for size_1d in size: - if not 2 <= size_1d <= 65: - msg = "Size should be in [2, 65] range." - raise ValueError(msg) - return size - - @classmethod - def generate(cls, size, callback, channels=3, target_mode=None): - """Generates new LUT using provided callback. - - :param size: Size of the table. Passed to the constructor. - :param callback: Function with three parameters which correspond - three color channels. Will be called ``size**3`` - times with values from 0.0 to 1.0 and should return - a tuple with ``channels`` elements. - :param channels: The number of channels which should return callback. - :param target_mode: Passed to the constructor of the resulting - lookup table. - """ - size_1d, size_2d, size_3d = cls._check_size(size) - if channels not in (3, 4): - msg = "Only 3 or 4 output channels are supported" - raise ValueError(msg) - - table = [0] * (size_1d * size_2d * size_3d * channels) - idx_out = 0 - for b in range(size_3d): - for g in range(size_2d): - for r in range(size_1d): - table[idx_out : idx_out + channels] = callback( - r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1) - ) - idx_out += channels - - return cls( - (size_1d, size_2d, size_3d), - table, - channels=channels, - target_mode=target_mode, - _copy_table=False, - ) - - def transform(self, callback, with_normals=False, channels=None, target_mode=None): - """Transforms the table values using provided callback and returns - a new LUT with altered values. - - :param callback: A function which takes old lookup table values - and returns a new set of values. The number - of arguments which function should take is - ``self.channels`` or ``3 + self.channels`` - if ``with_normals`` flag is set. - Should return a tuple of ``self.channels`` or - ``channels`` elements if it is set. - :param with_normals: If true, ``callback`` will be called with - coordinates in the color cube as the first - three arguments. Otherwise, ``callback`` - will be called only with actual color values. - :param channels: The number of channels in the resulting lookup table. - :param target_mode: Passed to the constructor of the resulting - lookup table. - """ - if channels not in (None, 3, 4): - msg = "Only 3 or 4 output channels are supported" - raise ValueError(msg) - ch_in = self.channels - ch_out = channels or ch_in - size_1d, size_2d, size_3d = self.size - - table = [0] * (size_1d * size_2d * size_3d * ch_out) - idx_in = 0 - idx_out = 0 - for b in range(size_3d): - for g in range(size_2d): - for r in range(size_1d): - values = self.table[idx_in : idx_in + ch_in] - if with_normals: - values = callback( - r / (size_1d - 1), - g / (size_2d - 1), - b / (size_3d - 1), - *values, - ) - else: - values = callback(*values) - table[idx_out : idx_out + ch_out] = values - idx_in += ch_in - idx_out += ch_out - - return type(self)( - self.size, - table, - channels=ch_out, - target_mode=target_mode or self.mode, - _copy_table=False, - ) - - def __repr__(self): - r = [ - f"{self.__class__.__name__} from {self.table.__class__.__name__}", - "size={:d}x{:d}x{:d}".format(*self.size), - f"channels={self.channels:d}", - ] - if self.mode: - r.append(f"target_mode={self.mode}") - return "<{}>".format(" ".join(r)) - - def filter(self, image): - from . import Image - - return image.color_lut_3d( - self.mode or image.mode, - Image.Resampling.BILINEAR, - self.channels, - self.size[0], - self.size[1], - self.size[2], - self.table, - ) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/expr/consts.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/expr/consts.py deleted file mode 100644 index 974fb06a3c756a7e27106f4d1bb9c17b78a094fd..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/expr/consts.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Dict - -from .core import ConstExpression - - -CONST_LISTING = { - "NaN": "not a number (same as JavaScript literal NaN)", - "LN10": "the natural log of 10 (alias to Math.LN10)", - "E": "the transcendental number e (alias to Math.E)", - "LOG10E": "the base 10 logarithm e (alias to Math.LOG10E)", - "LOG2E": "the base 2 logarithm of e (alias to Math.LOG2E)", - "SQRT1_2": "the square root of 0.5 (alias to Math.SQRT1_2)", - "LN2": "the natural log of 2 (alias to Math.LN2)", - "SQRT2": "the square root of 2 (alias to Math.SQRT1_2)", - "PI": "the transcendental number pi (alias to Math.PI)", -} - -NAME_MAP: Dict[str, str] = {} - - -def _populate_namespace(): - globals_ = globals() - for name, doc in CONST_LISTING.items(): - py_name = NAME_MAP.get(name, name) - globals_[py_name] = ConstExpression(name, doc) - yield py_name - - -__all__ = list(_populate_namespace()) diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssectypes.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssectypes.py deleted file mode 100644 index 02131e0adaeb85eb49351f4953c854023315fab9..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssectypes.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license - -# Copyright (C) 2003-2017 Nominum, Inc. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose with or without fee is hereby granted, -# provided that the above copyright notice and this permission notice -# appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -"""Common DNSSEC-related types.""" - -# This is a separate file to avoid import circularity between dns.dnssec and -# the implementations of the DS and DNSKEY types. - -import dns.enum - - -class Algorithm(dns.enum.IntEnum): - RSAMD5 = 1 - DH = 2 - DSA = 3 - ECC = 4 - RSASHA1 = 5 - DSANSEC3SHA1 = 6 - RSASHA1NSEC3SHA1 = 7 - RSASHA256 = 8 - RSASHA512 = 10 - ECCGOST = 12 - ECDSAP256SHA256 = 13 - ECDSAP384SHA384 = 14 - ED25519 = 15 - ED448 = 16 - INDIRECT = 252 - PRIVATEDNS = 253 - PRIVATEOID = 254 - - @classmethod - def _maximum(cls): - return 255 - - -class DSDigest(dns.enum.IntEnum): - """DNSSEC Delegation Signer Digest Algorithm""" - - NULL = 0 - SHA1 = 1 - SHA256 = 2 - GOST = 3 - SHA384 = 4 - - @classmethod - def _maximum(cls): - return 255 - - -class NSEC3Hash(dns.enum.IntEnum): - """NSEC3 hash algorithm""" - - SHA1 = 1 - - @classmethod - def _maximum(cls): - return 255 diff --git a/spaces/jonathang/WeatherBoy/app.py b/spaces/jonathang/WeatherBoy/app.py deleted file mode 100644 index 2967c7d50b0bda649c3814c831e54181893560df..0000000000000000000000000000000000000000 --- a/spaces/jonathang/WeatherBoy/app.py +++ /dev/null @@ -1,339 +0,0 @@ -import requests -import structlog -import openai -import os -import io -import random -import tiktoken -import enum -import time -import retrying -import IPython.display as display -from base64 import b64decode -import base64 -from io import BytesIO -import PIL -import PIL.Image -import PIL.ImageDraw -import PIL.ImageFont -import gradio as gr -import cachetools.func -from huggingface_hub import hf_hub_download -import concurrent.futures -import geopy - - -logger = structlog.getLogger() -weather_api_key = os.environ['WEATHER_API'] -openai.api_key = os.environ.get("OPENAI_KEY", None) - -animals = [x.strip() for x in open('animals.txt').readlines()] -art_styles = [x.strip() for x in open('art_styles.txt').readlines()] -font_path = hf_hub_download("jonathang/fonts-ttf", "Vogue.ttf") -other_font_path = hf_hub_download("ybelkada/fonts", "Arial.TTF") - - -@cachetools.cached(cache={}) -def get_lat_long_gmaps(zip): - api_key = os.environ["GMAPS_API"] - url = f"https://maps.googleapis.com/maps/api/geocode/json?address={zip}&key={api_key}" - resp = requests.get(url).json() - latlng = resp['results'][0]['geometry']['location'] - return latlng['lat'], latlng['lng'] - -@cachetools.cached(cache={}) -def get_lat_long(zip): - try: - loc = geopy.Nominatim(user_agent='weatherboy-gpt').geocode(str(zip)) - return loc.latitude, loc.longitude - except: - return get_lat_long_gmaps(zip) - - -class Chat: - class Model(enum.Enum): - GPT3_5 = "gpt-3.5-turbo" - GPT_4 = "gpt-4" - - def __init__(self, system, max_length=4096//2): - self._system = system - self._max_length = max_length - self._history = [ - {"role": "system", "content": self._system}, - ] - - @classmethod - def num_tokens_from_text(cls, text, model="gpt-3.5-turbo"): - """Returns the number of tokens used by some text.""" - encoding = tiktoken.encoding_for_model(model) - return len(encoding.encode(text)) - - @classmethod - def num_tokens_from_messages(cls, messages, model="gpt-3.5-turbo"): - """Returns the number of tokens used by a list of messages.""" - encoding = tiktoken.encoding_for_model(model) - num_tokens = 0 - for message in messages: - num_tokens += 4 # every message follows {role/name}\n{content}\n - for key, value in message.items(): - num_tokens += len(encoding.encode(value)) - if key == "name": # if there's a name, the role is omitted - num_tokens += -1 # role is always required and always 1 token - num_tokens += 2 # every reply is primed with assistant - return num_tokens - - @retrying.retry(stop_max_attempt_number=5, wait_fixed=2000) - def _msg(self, *args, model=Model.GPT3_5.value, **kwargs): - return openai.ChatCompletion.create( - *args, - model=model, - messages=self._history, - **kwargs - ) - - def message(self, next_msg=None, **kwargs): - # TODO: Optimize this if slow through easy caching - while len(self._history) > 1 and self.num_tokens_from_messages(self._history) > self._max_length: - logger.info(f'Popping message: {self._history.pop(1)}') - if next_msg is not None: - self._history.append({"role": "user", "content": next_msg}) - logger.info('requesting openai...') - resp = self._msg(**kwargs) - logger.info('received openai...') - text = resp.choices[0].message.content - self._history.append({"role": "assistant", "content": text}) - return text - -class Weather: - def __init__(self, zip_code='10001', api_key=weather_api_key): - self.zip_code = zip_code - self.api_key = api_key - - @cachetools.func.ttl_cache(maxsize=128, ttl=15*60) - def get_weather(self): - lat, long = get_lat_long(self.zip_code) - url = f"https://forecast.weather.gov/MapClick.php?lat={lat:.2f}&lon={long:.2f}&unit=0&lg=english&FcstType=json" - headers = {'accept': 'application/json'} - return requests.get(url, headers=headers).json() - - def get_info(self): - data = self.get_weather() - new_data = {} - new_data['now'] = data['currentobservation'] - - # The 'time' and 'data' keys seem to have hourly/daily data - # Assuming the first entry in these lists is for the current hour - new_data['hour'] = { - 'time': data['time']['startValidTime'][0], - 'tempLabel': data['time']['tempLabel'][0], - 'temperature': data['data']['temperature'][0], - 'pop': data['data']['pop'][0], - 'weather': data['data']['weather'][0], - 'iconLink': data['data']['iconLink'][0], - 'text': data['data']['text'][0], - } - - # And the rest of the 'time' and 'data' lists are for the rest of the day - new_data['day'] = { - 'time': data['time']['startValidTime'][1:], - 'tempLabel': data['time']['tempLabel'][1:], - 'temperature': data['data']['temperature'][1:], - 'pop': data['data']['pop'][1:], - 'weather': data['data']['weather'][1:], - 'iconLink': data['data']['iconLink'][1:], - 'text': data['data']['text'][1:], - } - - return new_data - - -class Image: - class Size(enum.Enum): - SMALL = "256x256" - MEDIUM = "512x512" - LARGE = "1024x1024" - - @classmethod - @retrying.retry(stop_max_attempt_number=5, wait_fixed=2000) - def create(cls, prompt, n=1, size=Size.SMALL): - logger.info('requesting openai.Image...') - resp = openai.Image.create(prompt=prompt, n=n, size=size.value, response_format='b64_json') - logger.info('received openai.Image...') - if n == 1: return resp["data"][0] - return resp["data"] - -def create_collage(image1, image2, image3, image4): - # assuming images are the same size - width, height = image1.size - - new_img = PIL.Image.new('RGB', (2 * width, 2 * height)) - - # place images in collage image - new_img.paste(image1, (0,0)) - new_img.paste(image2, (width, 0)) - new_img.paste(image3, (0, height)) - new_img.paste(image4, (width, height)) - - return new_img - -def overlay_text_on_image(img, text, position, text_color=(255, 255, 255), box_color=(0, 0, 0, 128), decode=False): - # Convert the base64 string back to an image - if decode: - img_bytes = base64.b64decode(img) - img = PIL.Image.open(BytesIO(img_bytes)) - - # Get image dimensions - img_width, img_height = img.size - - # Create a ImageDraw object - draw = PIL.ImageDraw.Draw(img) - - # Reduce the font size until it fits the image width or height - l, r = 1, 50 - while l < r: - font_size = (l + r) // 2 - font = PIL.ImageFont.truetype(font_path, font_size) - left, upper, right, lower = draw.textbbox((0, 0), text, font=font) - text_width = right - left - text_height = lower - upper - if text_width <= img_width and text_height <= img_height: - l = font_size + 1 - else: - r = font_size - 1 - font_size = max(l-1, 1) - - left, upper, right, lower = draw.textbbox((0, 0), text, font=font) - text_width = right - left - text_height = lower - upper - - if position == 'top-left': - x, y = 0, 0 - elif position == 'top-right': - x, y = img_width - text_width, 0 - elif position == 'bottom-left': - x, y = 0, img_height - text_height - elif position == 'bottom-right': - x, y = img_width - text_width, img_height - text_height - else: - raise ValueError("Position should be 'top-left', 'top-right', 'bottom-left' or 'bottom-right'.") - - # Draw a semi-transparent box around the text - draw.rectangle([x, y, x + text_width, y + text_height], fill=box_color) - - # Draw the text on the image - draw.text((x, y), text, font=font, fill=text_color) - - return img - - -class WeatherDraw: - def clean_text(self, weather_info): - chat = Chat("Given the following weather conditions, write a very small, concise plaintext summary that will overlay on top of an image.") - text = chat.message(str(weather_info)) - return text - - def generate_image(self, weather_info, **kwargs): - animal = random.choice(animals) - logger.info(f"Got animal {animal}") - chat = Chat(f'''Given -the following weather conditions, write a plaintext, short, and vivid description of an -image of an adorable anthropomorphised {animal} doing an activity in the weather. -The image should make obvious what the weather is. -The animal should be extremely anthropomorphised. -Only write the short description and nothing else. -Do not include specific numbers.'''.replace('\n', ' ')) - description = chat.message(str(weather_info)) - hd_modifiers = """3840x2160 -8k 3D / 16k 3D -8k resolution / 16k resolution -Detailed -Ultra HD -Ultrafine detail -""".split('\n') - prompt = f'{random.choice(art_styles)} of {description} {random.choice(hd_modifiers)}' - logger.info(prompt) - img = Image.create(prompt, **kwargs) - return img["b64_json"], prompt - - def step_one_forecast(self, weather_info, **kwargs): - img, txt = self.generate_image(weather_info, **kwargs) - # text = self.clean_text(weather_info) - # return overlay_text_on_image(img, text, 'bottom-left') - return img, txt - - def weather_img(self, weather_data): - import io - # Create a new image with white background - image = PIL.Image.new('RGB', (256, 256), (255, 255, 255)) - draw = PIL.ImageDraw.Draw(image) - - # Load a font - font = PIL.ImageFont.truetype(other_font_path, 12) - - # Draw text on the image - y_text = 5 - items_to_display = { - 'now': {'Temperature': weather_data['now']['Temp'], - 'Condition': weather_data['now']['Weather'],}, - 'hour': {'Temperature': weather_data['hour']['temperature'], - 'Condition': weather_data['hour']['weather']}, - 'day': {'High': int(max(float(t) for t in weather_data['day']['temperature'])), - 'Low': int(min(float(t) for t in weather_data['day']['temperature'])), - 'Condition': weather_data['day']['weather'][0]}, - } - - for category, values in items_to_display.items(): - draw.text((5, y_text), category, font=font, fill=(0, 0, 0)) - y_text += 15 - for key, value in values.items(): - text = f"{key}: {value}" - draw.text((10, y_text), text, font=font, fill=(0, 0, 0)) - y_text += 15 - - # Download the weather condition icon for now, day and next hour - for index, time in enumerate(items_to_display.keys()): - if time == 'day': - icon_url = weather_data['day']['iconLink'][0] - elif time == 'now': - icon_url = 'https://forecast.weather.gov/newimages/medium/'+weather_data['now']['Weatherimage'] - else: - icon_url = weather_data[time]['iconLink'] - try: - response = requests.get(icon_url) - icon = PIL.Image.open(io.BytesIO(response.content)) - except: - print(time, icon_url) - continue - # Resize the icon - icon = icon.resize((60, 60)) - # Paste the icon on the image - image.paste(icon, (index*70 + 10, 190)) - - return image - - def step(self, zip_code='10001', **kwargs): - forecast = Weather(zip_code).get_info() - images, texts = [], [] - with concurrent.futures.ThreadPoolExecutor(max_workers=4) as e: - runs = {} - for time, data in forecast.items(): - if time == 'etc': continue - runs[e.submit(self.step_one_forecast, data, **kwargs)] = time, data - for r in concurrent.futures.as_completed(runs.keys()): - img, txt = r.result() - time, data = runs[r] - images.append(overlay_text_on_image(img, time, 'top-right', decode=True)) - # images.append(overlay_text_on_image(img, '', 'top-right', decode=True)) - texts.append(txt) - return create_collage(*images, self.weather_img(forecast)), *texts, str(forecast) - - -# Define Gradio interface -iface = gr.Interface(fn=WeatherDraw().step, - inputs=gr.inputs.Textbox(label="Enter Zipcode"), - outputs=[gr.outputs.Image(type='pil'), "text", "text", "text", "text"], - title="US Zipcode Weather", - description="Enter a US Zipcode and get some weather.") - -# Run the interface -iface.launch() \ No newline at end of file diff --git a/spaces/jordonpeter01/MusicGen2/audiocraft/models/loaders.py b/spaces/jordonpeter01/MusicGen2/audiocraft/models/loaders.py deleted file mode 100644 index 97c662c3212b7695669cbfc5214ff2f099c3f319..0000000000000000000000000000000000000000 --- a/spaces/jordonpeter01/MusicGen2/audiocraft/models/loaders.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Utility functions to load from the checkpoints. -Each checkpoint is a torch.saved dict with the following keys: -- 'xp.cfg': the hydra config as dumped during training. This should be used - to rebuild the object using the audiocraft.models.builders functions, -- 'model_best_state': a readily loadable best state for the model, including - the conditioner. The model obtained from `xp.cfg` should be compatible - with this state dict. In the case of a LM, the encodec model would not be - bundled along but instead provided separately. - -Those functions also support loading from a remote location with the Torch Hub API. -They also support overriding some parameters, in particular the device and dtype -of the returned model. -""" - -from pathlib import Path -from huggingface_hub import hf_hub_download -import typing as tp -import os - -from omegaconf import OmegaConf -import torch - -from . import builders - - -HF_MODEL_CHECKPOINTS_MAP = { - "small": "facebook/musicgen-small", - "medium": "facebook/musicgen-medium", - "large": "facebook/musicgen-large", - "melody": "facebook/musicgen-melody", -} - - -def _get_state_dict( - file_or_url_or_id: tp.Union[Path, str], - filename: tp.Optional[str] = None, - device='cpu', - cache_dir: tp.Optional[str] = None, -): - # Return the state dict either from a file or url - file_or_url_or_id = str(file_or_url_or_id) - assert isinstance(file_or_url_or_id, str) - - if os.path.isfile(file_or_url_or_id): - return torch.load(file_or_url_or_id, map_location=device) - - if os.path.isdir(file_or_url_or_id): - file = f"{file_or_url_or_id}/{filename}" - return torch.load(file, map_location=device) - - elif file_or_url_or_id.startswith('https://'): - return torch.hub.load_state_dict_from_url(file_or_url_or_id, map_location=device, check_hash=True) - - elif file_or_url_or_id in HF_MODEL_CHECKPOINTS_MAP: - assert filename is not None, "filename needs to be defined if using HF checkpoints" - - repo_id = HF_MODEL_CHECKPOINTS_MAP[file_or_url_or_id] - file = hf_hub_download(repo_id=repo_id, filename=filename, cache_dir=cache_dir) - return torch.load(file, map_location=device) - - else: - raise ValueError(f"{file_or_url_or_id} is not a valid name, path or link that can be loaded.") - - -def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None): - pkg = _get_state_dict(file_or_url_or_id, filename="compression_state_dict.bin", cache_dir=cache_dir) - cfg = OmegaConf.create(pkg['xp.cfg']) - cfg.device = str(device) - model = builders.get_compression_model(cfg) - model.load_state_dict(pkg['best_state']) - model.eval() - return model - - -def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None): - pkg = _get_state_dict(file_or_url_or_id, filename="state_dict.bin", cache_dir=cache_dir) - cfg = OmegaConf.create(pkg['xp.cfg']) - cfg.device = str(device) - if cfg.device == 'cpu': - cfg.dtype = 'float32' - else: - cfg.dtype = 'float16' - model = builders.get_lm_model(cfg) - model.load_state_dict(pkg['best_state']) - model.eval() - model.cfg = cfg - return model diff --git a/spaces/jspr/paperchat/ingest_faiss.py b/spaces/jspr/paperchat/ingest_faiss.py deleted file mode 100644 index 696d9eab238d1f37acd0c3bd7c5c312b792d9328..0000000000000000000000000000000000000000 --- a/spaces/jspr/paperchat/ingest_faiss.py +++ /dev/null @@ -1,41 +0,0 @@ -from pathlib import Path -from langchain.text_splitter import CharacterTextSplitter -import faiss -from langchain.vectorstores import FAISS -from langchain.embeddings import OpenAIEmbeddings -import pickle - -def create_vector_store(suffix, paper_text): - # with open('paper-dir/main.txt') as f: - # paper_text = f.read() - - split_chars = ["§", "§.§"] - data = [] - for c in split_chars: - paper_text = paper_text.replace(c, "§") - data = paper_text.split("§") - - # metadatas is the rest of the text on the same line as the section symbol - sources = [] - for d in data: - sources.append(d.split("\n")[0].strip()) - # data = [d.split("\n")[1:] for d in data] - - sources[0] = "Beginning of paper" - - # Here we split the documents, as needed, into smaller chunks. - # We do this due to the context limits of the LLMs. - text_splitter = CharacterTextSplitter(chunk_size=1500, separator="\n") - docs = [] - metadatas = [] - for i, d in enumerate(data): - splits = text_splitter.split_text(d) - docs.extend(splits) - metadatas.extend([{"source": sources[i]}] * len(splits)) - - # Here we create a vector store from the documents and save it to disk. - store = FAISS.from_texts(docs, OpenAIEmbeddings(), metadatas=metadatas) - faiss.write_index(store.index, f"{suffix}/docs.index") - store.index = None - with open(f"{suffix}/faiss_store.pkl", "wb") as f: - pickle.dump(store, f) diff --git a/spaces/jsu27/decomp-diffusion/upsampling.py b/spaces/jsu27/decomp-diffusion/upsampling.py deleted file mode 100644 index 1ca3d631743b0f04111bd0ca0ef62757449b650f..0000000000000000000000000000000000000000 --- a/spaces/jsu27/decomp-diffusion/upsampling.py +++ /dev/null @@ -1,43 +0,0 @@ -import torch as th -from diffusers import IFImg2ImgSuperResolutionPipeline -from transformers import T5EncoderModel -from PIL import Image -import numpy as np - -def get_pipeline(): - - text_encoder = T5EncoderModel.from_pretrained( - "DeepFloyd/IF-I-XL-v1.0", - subfolder="text_encoder", - device_map="auto", - load_in_8bit=True, - variant="8bit" - ) - - pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained( - "DeepFloyd/IF-II-L-v1.0", - text_encoder=text_encoder, - variant="fp16", - torch_dtype=th.float16, - device_map="auto", - watermarker=None - ) - return pipe - - - -def upscale_image(im, pipe): - """im is 64x64 PIL image""" - prompt = '' - prompt_embeds, negative_embeds = pipe.encode_prompt(prompt) - generator = th.Generator().manual_seed(0) - - image = pipe( - image=original_image, - original_image=original_image, - prompt_embeds=prompt_embeds, - negative_prompt_embeds=negative_embeds, - generator=generator, - ).images[0] - - return image \ No newline at end of file diff --git a/spaces/juliensimon/table_questions/README.md b/spaces/juliensimon/table_questions/README.md deleted file mode 100644 index 4b3dc97d45106be363b6996c00ad8893cebb17a0..0000000000000000000000000000000000000000 --- a/spaces/juliensimon/table_questions/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Table Questions -emoji: 🐠 -colorFrom: red -colorTo: red -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/justest/gpt4free/g4f/.v1/gpt4free/usesless/__init__.py b/spaces/justest/gpt4free/g4f/.v1/gpt4free/usesless/__init__.py deleted file mode 100644 index 00f7f75d0e33d106a514a98fdb20234fbf80d6c2..0000000000000000000000000000000000000000 --- a/spaces/justest/gpt4free/g4f/.v1/gpt4free/usesless/__init__.py +++ /dev/null @@ -1,158 +0,0 @@ -import string -import time -import re -import json -import requests -import fake_useragent -import random -from password_generator import PasswordGenerator - -from .utils import create_email, check_email - - -class Account: - @staticmethod - def create(logging: bool = False): - is_custom_domain = input( - "Do you want to use your custom domain name for temporary email? [Y/n]: " - ).upper() - - if is_custom_domain == "Y": - mail_address = create_email(custom_domain=True, logging=logging) - elif is_custom_domain == "N": - mail_address = create_email(custom_domain=False, logging=logging) - else: - print("Please, enter either Y or N") - return - - name = string.ascii_lowercase + string.digits - username = "".join(random.choice(name) for i in range(20)) - - pwo = PasswordGenerator() - pwo.minlen = 8 - password = pwo.generate() - - session = requests.Session() - - register_url = "https://ai.usesless.com/api/cms/auth/local/register" - register_json = { - "username": username, - "password": password, - "email": mail_address, - } - headers = { - "authority": "ai.usesless.com", - "accept": "application/json, text/plain, */*", - "accept-language": "en-US,en;q=0.5", - "cache-control": "no-cache", - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": fake_useragent.UserAgent().random, - } - register = session.post(register_url, json=register_json, headers=headers) - if logging: - if register.status_code == 200: - print("Registered successfully") - else: - print(register.status_code) - print(register.json()) - print("There was a problem with account registration, try again") - - if register.status_code != 200: - quit() - - while True: - time.sleep(5) - messages = check_email(mail=mail_address, logging=logging) - - # Check if method `message_list()` didn't return None or empty list. - if not messages or len(messages) == 0: - # If it returned None or empty list sleep for 5 seconds to wait for new message. - continue - - message_text = messages[0]["content"] - verification_url = re.findall( - r"http:\/\/ai\.usesless\.com\/api\/cms\/auth\/email-confirmation\?confirmation=\w.+\w\w", - message_text, - )[0] - if verification_url: - break - - session.get(verification_url) - login_json = {"identifier": mail_address, "password": password} - login_request = session.post( - url="https://ai.usesless.com/api/cms/auth/local", json=login_json - ) - - token = login_request.json()["jwt"] - if logging and token: - print(f"Token: {token}") - - with open("account.json", "w") as file: - json.dump({"email": mail_address, "token": token}, file) - if logging: - print( - "\nNew account credentials has been successfully saved in 'account.json' file" - ) - - return token - - -class Completion: - @staticmethod - def create( - token: str, - systemMessage: str = "You are a helpful assistant", - prompt: str = "", - parentMessageId: str = "", - presence_penalty: float = 1, - temperature: float = 1, - model: str = "gpt-3.5-turbo", - ): - headers = { - "authority": "ai.usesless.com", - "accept": "application/json, text/plain, */*", - "accept-language": "en-US,en;q=0.5", - "cache-control": "no-cache", - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": fake_useragent.UserAgent().random, - "Authorization": f"Bearer {token}", - } - - json_data = { - "openaiKey": "", - "prompt": prompt, - "options": { - "parentMessageId": parentMessageId, - "systemMessage": systemMessage, - "completionParams": { - "presence_penalty": presence_penalty, - "temperature": temperature, - "model": model, - }, - }, - } - - url = "https://ai.usesless.com/api/chat-process" - request = requests.post(url, headers=headers, json=json_data) - request.encoding = request.apparent_encoding - content = request.content - - response = Completion.__response_to_json(content) - return response - - - @classmethod - def __response_to_json(cls, text) -> str: - text = str(text.decode("utf-8")) - - split_text = text.rsplit("\n", 1) - if len(split_text) > 1: - to_json = json.loads(split_text[1]) - return to_json - else: - return None - diff --git a/spaces/jytole/hftesting/README.md b/spaces/jytole/hftesting/README.md deleted file mode 100644 index ff80aead3e36c02b330c89e789e54fc1ff17319b..0000000000000000000000000000000000000000 --- a/spaces/jytole/hftesting/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Hftesting -emoji: 🐨 -colorFrom: blue -colorTo: pink -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false -license: cc -python_version: 3.9.13 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kadirnar/classifyhub/README.md b/spaces/kadirnar/classifyhub/README.md deleted file mode 100644 index f383aa918ae0ffb44a020f6b00322b94108f5476..0000000000000000000000000000000000000000 --- a/spaces/kadirnar/classifyhub/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Classifyhub -emoji: 🌍 -colorFrom: green -colorTo: gray -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: gpl-3.0 -tags: -- making-demos ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kevinwang676/VoiceChanger/src/face3d/util/__init__.py b/spaces/kevinwang676/VoiceChanger/src/face3d/util/__init__.py deleted file mode 100644 index 04eecb58b62f8c9d11d17606c6241d278a48b9b9..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChanger/src/face3d/util/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -"""This package includes a miscellaneous collection of useful helper functions.""" -from src.face3d.util import * - diff --git a/spaces/kosurisiva/MyGenAiChatBot/README.md b/spaces/kosurisiva/MyGenAiChatBot/README.md deleted file mode 100644 index 1e4a67438dd921cb10346fd946f0073c3469de48..0000000000000000000000000000000000000000 --- a/spaces/kosurisiva/MyGenAiChatBot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MyGenAiChatBot -emoji: 📊 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/functorch/dim/wrap_type.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/functorch/dim/wrap_type.py deleted file mode 100644 index 8212836d3d6ae71f84dcb161b60bd513fe35b571..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/functorch/dim/wrap_type.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# All rights reserved. -# -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -from types import FunctionType, BuiltinMethodType, MethodDescriptorType, WrapperDescriptorType, GetSetDescriptorType -from functorch._C import dim as _C -_wrap_method = _C._wrap_method - -FUNC_TYPES = (FunctionType, MethodDescriptorType, BuiltinMethodType, WrapperDescriptorType) -PROPERTY_TYPES = (GetSetDescriptorType, property) - -def _py_wrap_method(orig, __torch_function__): - def impl(*args, **kwargs): - return __torch_function__(orig, None, args, kwargs) - return impl - - - -def wrap_type(use_c, to_patch, pattern, __torch_function__): - - if use_c: - wrap_method = _wrap_method - else: - wrap_method = _py_wrap_method - - all = {} - for t in reversed(pattern.mro()[:-1]): # skip object - all.update(t.__dict__) - - - def wrap_attr(orig): - return property(wrap_method(orig.__get__, __torch_function__)) - - - for name, obj in all.items(): - if name in ('__dict__', '__new__', '__init__', '__repr__', '__weakref__', '__doc__', '__module__', '__dir__'): - continue - - # skip things that have been overloaded - # things that come from object like `__eq__` still need to be patched, however. - if hasattr(to_patch, name) and getattr(to_patch, name) is not getattr(object, name, None): - continue - - if isinstance(obj, FUNC_TYPES): - setattr(to_patch, name, wrap_method(obj, __torch_function__)) - elif isinstance(obj, PROPERTY_TYPES): - setattr(to_patch, name, wrap_attr(obj)) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/_webhooks_server.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/_webhooks_server.py deleted file mode 100644 index 7cc5dd4ce7769fee10e0198cffe79f64a33b211d..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/_webhooks_server.py +++ /dev/null @@ -1,369 +0,0 @@ -# coding=utf-8 -# Copyright 2023-present, the HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Contains `WebhooksServer` and `webhook_endpoint` to create a webhook server easily.""" -import atexit -import inspect -import os -from functools import wraps -from typing import TYPE_CHECKING, Callable, Dict, Optional - -from .utils import experimental, is_gradio_available - - -if TYPE_CHECKING: - import gradio as gr - - -from fastapi import FastAPI, Request -from fastapi.responses import JSONResponse - - -_global_app: Optional["WebhooksServer"] = None -_is_local = os.getenv("SYSTEM") != "spaces" - - -@experimental -class WebhooksServer: - """ - The [`WebhooksServer`] class lets you create an instance of a Gradio app that can receive Huggingface webhooks. - These webhooks can be registered using the [`~WebhooksServer.add_webhook`] decorator. Webhook endpoints are added to - the app as a POST endpoint to the FastAPI router. Once all the webhooks are registered, the `run` method has to be - called to start the app. - - It is recommended to accept [`WebhookPayload`] as the first argument of the webhook function. It is a Pydantic - model that contains all the information about the webhook event. The data will be parsed automatically for you. - - Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your - WebhooksServer and deploy it on a Space. - - - - `WebhooksServer` is experimental. Its API is subject to change in the future. - - - - - - You must have `gradio` installed to use `WebhooksServer` (`pip install --upgrade gradio`). - - - - Args: - ui (`gradio.Blocks`, optional): - A Gradio UI instance to be used as the Space landing page. If `None`, a UI displaying instructions - about the configured webhooks is created. - webhook_secret (`str`, optional): - A secret key to verify incoming webhook requests. You can set this value to any secret you want as long as - you also configure it in your [webhooks settings panel](https://huggingface.co/settings/webhooks). You - can also set this value as the `WEBHOOK_SECRET` environment variable. If no secret is provided, the - webhook endpoints are opened without any security. - - Example: - - ```python - import gradio as gr - from huggingface_hub import WebhooksServer, WebhookPayload - - with gr.Blocks() as ui: - ... - - app = WebhooksServer(ui=ui, webhook_secret="my_secret_key") - - @app.add_webhook("/say_hello") - async def hello(payload: WebhookPayload): - return {"message": "hello"} - - app.run() - ``` - """ - - def __new__(cls, *args, **kwargs) -> "WebhooksServer": - if not is_gradio_available(): - raise ImportError( - "You must have `gradio` installed to use `WebhooksServer`. Please run `pip install --upgrade gradio`" - " first." - ) - return super().__new__(cls) - - def __init__( - self, - ui: Optional["gr.Blocks"] = None, - webhook_secret: Optional[str] = None, - ) -> None: - self._ui = ui - - self.webhook_secret = webhook_secret or os.getenv("WEBHOOK_SECRET") - self.registered_webhooks: Dict[str, Callable] = {} - _warn_on_empty_secret(self.webhook_secret) - - def add_webhook(self, path: Optional[str] = None) -> Callable: - """ - Decorator to add a webhook to the [`WebhooksServer`] server. - - Args: - path (`str`, optional): - The URL path to register the webhook function. If not provided, the function name will be used as the - path. In any case, all webhooks are registered under `/webhooks`. - - Raises: - ValueError: If the provided path is already registered as a webhook. - - Example: - ```python - from huggingface_hub import WebhooksServer, WebhookPayload - - app = WebhooksServer() - - @app.add_webhook - async def trigger_training(payload: WebhookPayload): - if payload.repo.type == "dataset" and payload.event.action == "update": - # Trigger a training job if a dataset is updated - ... - - app.run() - ``` - """ - # Usage: directly as decorator. Example: `@app.add_webhook` - if callable(path): - # If path is a function, it means it was used as a decorator without arguments - return self.add_webhook()(path) - - # Usage: provide a path. Example: `@app.add_webhook(...)` - @wraps(FastAPI.post) - def _inner_post(*args, **kwargs): - func = args[0] - abs_path = f"/webhooks/{(path or func.__name__).strip('/')}" - if abs_path in self.registered_webhooks: - raise ValueError(f"Webhook {abs_path} already exists.") - self.registered_webhooks[abs_path] = func - - return _inner_post - - def run(self) -> None: - """Starts the Gradio app with the FastAPI server and registers the webhooks.""" - ui = self._ui or self._get_default_ui() - - # Start Gradio App - # - as non-blocking so that webhooks can be added afterwards - # - as shared if launch locally (to debug webhooks) - self.fastapi_app, _, _ = ui.launch(prevent_thread_lock=True, share=_is_local) - - # Register webhooks to FastAPI app - for path, func in self.registered_webhooks.items(): - # Add secret check if required - if self.webhook_secret is not None: - func = _wrap_webhook_to_check_secret(func, webhook_secret=self.webhook_secret) - - # Add route to FastAPI app - self.fastapi_app.post(path)(func) - - # Print instructions and block main thread - url = (ui.share_url or ui.local_url).strip("/") - message = "\nWebhooks are correctly setup and ready to use:" - message += "\n" + "\n".join(f" - POST {url}{webhook}" for webhook in self.registered_webhooks) - message += "\nGo to https://huggingface.co/settings/webhooks to setup your webhooks." - print(message) - - ui.block_thread() - - def _get_default_ui(self) -> "gr.Blocks": - """Default UI if not provided (lists webhooks and provides basic instructions).""" - import gradio as gr - - with gr.Blocks() as ui: - gr.Markdown("# This is an app to process 🤗 Webhooks") - gr.Markdown( - "Webhooks are a foundation for MLOps-related features. They allow you to listen for new changes on" - " specific repos or to all repos belonging to particular set of users/organizations (not just your" - " repos, but any repo). Check out this [guide](https://huggingface.co/docs/hub/webhooks) to get to" - " know more about webhooks on the Huggingface Hub." - ) - gr.Markdown( - f"{len(self.registered_webhooks)} webhook(s) are registered:" - + "\n\n" - + "\n ".join( - f"- [{webhook_path}]({_get_webhook_doc_url(webhook.__name__, webhook_path)})" - for webhook_path, webhook in self.registered_webhooks.items() - ) - ) - gr.Markdown( - "Go to https://huggingface.co/settings/webhooks to setup your webhooks." - + "\nYou app is running locally. Please look at the logs to check the full URL you need to set." - if _is_local - else ( - "\nThis app is running on a Space. You can find the corresponding URL in the options menu" - " (top-right) > 'Embed the Space'. The URL looks like 'https://{username}-{repo_name}.hf.space'." - ) - ) - return ui - - -@experimental -def webhook_endpoint(path: Optional[str] = None) -> Callable: - """Decorator to start a [`WebhooksServer`] and register the decorated function as a webhook endpoint. - - This is a helper to get started quickly. If you need more flexibility (custom landing page or webhook secret), - you can use [`WebhooksServer`] directly. You can register multiple webhook endpoints (to the same server) by using - this decorator multiple times. - - Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your - server and deploy it on a Space. - - - - `webhook_endpoint` is experimental. Its API is subject to change in the future. - - - - - - You must have `gradio` installed to use `webhook_endpoint` (`pip install --upgrade gradio`). - - - - Args: - path (`str`, optional): - The URL path to register the webhook function. If not provided, the function name will be used as the path. - In any case, all webhooks are registered under `/webhooks`. - - Examples: - The default usage is to register a function as a webhook endpoint. The function name will be used as the path. - The server will be started automatically at exit (i.e. at the end of the script). - - ```python - from huggingface_hub import webhook_endpoint, WebhookPayload - - @webhook_endpoint - async def trigger_training(payload: WebhookPayload): - if payload.repo.type == "dataset" and payload.event.action == "update": - # Trigger a training job if a dataset is updated - ... - - # Server is automatically started at the end of the script. - ``` - - Advanced usage: register a function as a webhook endpoint and start the server manually. This is useful if you - are running it in a notebook. - - ```python - from huggingface_hub import webhook_endpoint, WebhookPayload - - @webhook_endpoint - async def trigger_training(payload: WebhookPayload): - if payload.repo.type == "dataset" and payload.event.action == "update": - # Trigger a training job if a dataset is updated - ... - - # Start the server manually - trigger_training.run() - ``` - """ - if callable(path): - # If path is a function, it means it was used as a decorator without arguments - return webhook_endpoint()(path) - - @wraps(WebhooksServer.add_webhook) - def _inner(func: Callable) -> Callable: - app = _get_global_app() - app.add_webhook(path)(func) - if len(app.registered_webhooks) == 1: - # Register `app.run` to run at exit (only once) - atexit.register(app.run) - - @wraps(app.run) - def _run_now(): - # Run the app directly (without waiting atexit) - atexit.unregister(app.run) - app.run() - - func.run = _run_now # type: ignore - return func - - return _inner - - -def _get_global_app() -> WebhooksServer: - global _global_app - if _global_app is None: - _global_app = WebhooksServer() - return _global_app - - -def _warn_on_empty_secret(webhook_secret: Optional[str]) -> None: - if webhook_secret is None: - print("Webhook secret is not defined. This means your webhook endpoints will be open to everyone.") - print( - "To add a secret, set `WEBHOOK_SECRET` as environment variable or pass it at initialization: " - "\n\t`app = WebhooksServer(webhook_secret='my_secret', ...)`" - ) - print( - "For more details about webhook secrets, please refer to" - " https://huggingface.co/docs/hub/webhooks#webhook-secret." - ) - else: - print("Webhook secret is correctly defined.") - - -def _get_webhook_doc_url(webhook_name: str, webhook_path: str) -> str: - """Returns the anchor to a given webhook in the docs (experimental)""" - return "/docs#/default/" + webhook_name + webhook_path.replace("/", "_") + "_post" - - -def _wrap_webhook_to_check_secret(func: Callable, webhook_secret: str) -> Callable: - """Wraps a webhook function to check the webhook secret before calling the function. - - This is a hacky way to add the `request` parameter to the function signature. Since FastAPI based itself on route - parameters to inject the values to the function, we need to hack the function signature to retrieve the `Request` - object (and hence the headers). A far cleaner solution would be to use a middleware. However, since - `fastapi==0.90.1`, a middleware cannot be added once the app has started. And since the FastAPI app is started by - Gradio internals (and not by us), we cannot add a middleware. - - This method is called only when a secret has been defined by the user. If a request is sent without the - "x-webhook-secret", the function will return a 401 error (unauthorized). If the header is sent but is incorrect, - the function will return a 403 error (forbidden). - - Inspired by https://stackoverflow.com/a/33112180. - """ - initial_sig = inspect.signature(func) - - @wraps(func) - async def _protected_func(request: Request, **kwargs): - request_secret = request.headers.get("x-webhook-secret") - if request_secret is None: - return JSONResponse({"error": "x-webhook-secret header not set."}, status_code=401) - if request_secret != webhook_secret: - return JSONResponse({"error": "Invalid webhook secret."}, status_code=403) - - # Inject `request` in kwargs if required - if "request" in initial_sig.parameters: - kwargs["request"] = request - - # Handle both sync and async routes - if inspect.iscoroutinefunction(func): - return await func(**kwargs) - else: - return func(**kwargs) - - # Update signature to include request - if "request" not in initial_sig.parameters: - _protected_func.__signature__ = initial_sig.replace( # type: ignore - parameters=( - inspect.Parameter(name="request", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=Request), - ) - + tuple(initial_sig.parameters.values()) - ) - - # Return protected route - return _protected_func diff --git a/spaces/leave7/kazunaAI2.0/add_speaker.py b/spaces/leave7/kazunaAI2.0/add_speaker.py deleted file mode 100644 index e224f07c892a5fe1837e3cbf1745e0d8992ea283..0000000000000000000000000000000000000000 --- a/spaces/leave7/kazunaAI2.0/add_speaker.py +++ /dev/null @@ -1,62 +0,0 @@ -import os -import argparse -from tqdm import tqdm -from random import shuffle -import json - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--train_list", type=str, default="./filelists/train.txt", help="path to train list") - parser.add_argument("--val_list", type=str, default="./filelists/val.txt", help="path to val list") - parser.add_argument("--test_list", type=str, default="./filelists/test.txt", help="path to test list") - parser.add_argument("--source_dir", type=str, default="./dataset/32k", help="path to source dir") - args = parser.parse_args() - - previous_config = json.load(open("configs/config.json", "rb")) - - train = [] - val = [] - test = [] - idx = 0 - spk_dict = previous_config["spk"] - spk_id = max([i for i in spk_dict.values()]) + 1 - for speaker in tqdm(os.listdir(args.source_dir)): - if speaker not in spk_dict.keys(): - spk_dict[speaker] = spk_id - spk_id += 1 - wavs = [os.path.join(args.source_dir, speaker, i)for i in os.listdir(os.path.join(args.source_dir, speaker))] - wavs = [i for i in wavs if i.endswith("wav")] - shuffle(wavs) - train += wavs[2:-10] - val += wavs[:2] - test += wavs[-10:] - - assert previous_config["model"]["n_speakers"] > len(spk_dict.keys()) - shuffle(train) - shuffle(val) - shuffle(test) - - print("Writing", args.train_list) - with open(args.train_list, "w") as f: - for fname in tqdm(train): - wavpath = fname - f.write(wavpath + "\n") - - print("Writing", args.val_list) - with open(args.val_list, "w") as f: - for fname in tqdm(val): - wavpath = fname - f.write(wavpath + "\n") - - print("Writing", args.test_list) - with open(args.test_list, "w") as f: - for fname in tqdm(test): - wavpath = fname - f.write(wavpath + "\n") - - previous_config["spk"] = spk_dict - - print("Writing configs/config.json") - with open("configs/config.json", "w") as f: - json.dump(previous_config, f, indent=2) diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Free Xats Generator V1.3 Download [TOP].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Free Xats Generator V1.3 Download [TOP].md deleted file mode 100644 index f75d3cccbd175b07b07f729d8e020ec96fbc00be..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Free Xats Generator V1.3 Download [TOP].md +++ /dev/null @@ -1,6 +0,0 @@ -

      Free xats generator v1.3 download


      Download Filehttps://bytlly.com/2uGyAT



      - -Esteem 8 Software Crack Sites > https://bytlly.com/1sitnf · Free xats generator v1.3 download · Knock Out hindi movie torrent. Bein sport 1 canl izle hd pulive ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/utils/utils_config.py b/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/utils/utils_config.py deleted file mode 100644 index 0c02eaf70fc0140aca7925f621c29a496f491cae..0000000000000000000000000000000000000000 --- a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/utils/utils_config.py +++ /dev/null @@ -1,16 +0,0 @@ -import importlib -import os.path as osp - - -def get_config(config_file): - assert config_file.startswith('configs/'), 'config file setting must start with configs/' - temp_config_name = osp.basename(config_file) - temp_module_name = osp.splitext(temp_config_name)[0] - config = importlib.import_module("configs.base") - cfg = config.config - config = importlib.import_module("configs.%s" % temp_module_name) - job_cfg = config.config - cfg.update(job_cfg) - if cfg.output is None: - cfg.output = osp.join('work_dirs', temp_module_name) - return cfg \ No newline at end of file diff --git a/spaces/ljjggr/bingo/src/lib/bots/bing/types.ts b/spaces/ljjggr/bingo/src/lib/bots/bing/types.ts deleted file mode 100644 index 02cd5e8b01e3529642d28dc1539bf958f4ac420b..0000000000000000000000000000000000000000 --- a/spaces/ljjggr/bingo/src/lib/bots/bing/types.ts +++ /dev/null @@ -1,259 +0,0 @@ -export type Author = 'user' | 'system' | 'bot' - -export type BotId = 'bing' - -export enum BingConversationStyle { - Creative = 'Creative', - Balanced = 'Balanced', - Precise = 'Precise' -} - -export enum ErrorCode { - CONVERSATION_LIMIT = 'CONVERSATION_LIMIT', - BING_UNAUTHORIZED = 'BING_UNAUTHORIZED', - BING_FORBIDDEN = 'BING_FORBIDDEN', - BING_CAPTCHA = 'BING_CAPTCHA', - THROTTLE_LIMIT = 'THROTTLE_LIMIT', - NOTFOUND_ERROR = 'NOT_FOUND_ERROR', - UNKOWN_ERROR = 'UNKOWN_ERROR', - NETWORK_ERROR = 'NETWORK_ERROR', -} - -export class ChatError extends Error { - code: ErrorCode - constructor(message: string, code: ErrorCode) { - super(message) - this.code = code - } -} - -export type ChatMessageModel = { - id: string - author: Author - text: string - error?: ChatError - throttling?: Throttling - sourceAttributions?: SourceAttribution[] - suggestedResponses?: SuggestedResponse[] -} - -export interface ConversationModel { - messages: ChatMessageModel[] -} - -export type Event = - | { - type: 'UPDATE_ANSWER' - data: { - text: string - spokenText?: string - sourceAttributions?: SourceAttribution[] - suggestedResponses?: SuggestedResponse[] - throttling?: Throttling - } - } - | { - type: 'DONE' - } - | { - type: 'ERROR' - error: ChatError - } - -export interface SendMessageParams { - prompt: string - imageUrl?: string - options: T - onEvent: (event: Event) => void - signal?: AbortSignal -} - -export interface ConversationResponse { - conversationId: string - clientId: string - conversationSignature: string - result: { - value: string - message?: string - } -} - -export interface Telemetry { - metrics?: null - startTime: string -} - -export interface ChatUpdateArgument { - messages?: ChatResponseMessage[] - throttling?: Throttling - requestId: string - result: null -} - -export type ChatUpdateCompleteResponse = { - type: 2 - invocationId: string - item: ChatResponseItem -} | { - type: 1 - target: string - arguments: ChatUpdateArgument[] -} | { - type: 3 - invocationId: string -} | { - type: 6 | 7 -} - -export interface ChatRequestResult { - value: string - serviceVersion: string - error?: string -} - -export interface ChatResponseItem { - messages: ChatResponseMessage[] - firstNewMessageIndex: number - suggestedResponses: null - conversationId: string - requestId: string - conversationExpiryTime: string - telemetry: Telemetry - result: ChatRequestResult - throttling: Throttling -} -export enum InvocationEventType { - Invocation = 1, - StreamItem = 2, - Completion = 3, - StreamInvocation = 4, - CancelInvocation = 5, - Ping = 6, - Close = 7, -} - -// https://github.com/bytemate/bingchat-api/blob/main/src/lib.ts - -export interface ConversationInfo { - conversationId: string - clientId: string - conversationSignature: string - invocationId: number - conversationStyle: BingConversationStyle - prompt: string - imageUrl?: string -} - -export interface BingChatResponse { - conversationSignature: string - conversationId: string - clientId: string - invocationId: number - conversationExpiryTime: Date - response: string - details: ChatResponseMessage -} - -export interface Throttling { - maxNumLongDocSummaryUserMessagesInConversation: number - maxNumUserMessagesInConversation: number - numLongDocSummaryUserMessagesInConversation: number - numUserMessagesInConversation: number -} - -export interface ChatResponseMessage { - text: string - spokenText?: string - author: string - createdAt: Date - timestamp: Date - messageId: string - requestId: string - offense: string - adaptiveCards: AdaptiveCard[] - sourceAttributions: SourceAttribution[] - feedback: Feedback - contentOrigin: string - messageType?: string - contentType?: string - privacy: null - suggestedResponses: SuggestedResponse[] -} - -export interface AdaptiveCard { - type: string - version: string - body: Body[] -} - -export interface Body { - type: string - text: string - wrap: boolean - size?: string -} - -export interface Feedback { - tag: null - updatedOn: null - type: string -} - -export interface SourceAttribution { - providerDisplayName: string - seeMoreUrl: string - searchQuery: string -} - -export interface SuggestedResponse { - text: string - author?: Author - createdAt?: Date - timestamp?: Date - messageId?: string - messageType?: string - offense?: string - feedback?: Feedback - contentOrigin?: string - privacy?: null -} - -export interface KBlobRequest { - knowledgeRequest: KnowledgeRequestContext - imageBase64?: string -} - -export interface KBlobResponse { - blobId: string - processedBlobId?: string -} - -export interface KnowledgeRequestContext { - imageInfo: ImageInfo; - knowledgeRequest: KnowledgeRequest; -} - -export interface ImageInfo { - url?: string; -} - -export interface KnowledgeRequest { - invokedSkills: string[]; - subscriptionId: string; - invokedSkillsRequestData: InvokedSkillsRequestData; - convoData: ConvoData; -} - -export interface ConvoData { - convoid: string; - convotone: BingConversationStyle; -} - -export interface InvokedSkillsRequestData { - enableFaceBlur: boolean; -} - -export interface FileItem { - url: string; - status?: 'loading' | 'error' | 'loaded' -} diff --git a/spaces/ltgoslo/ssa-perin/mtool/score/sdp.py b/spaces/ltgoslo/ssa-perin/mtool/score/sdp.py deleted file mode 100644 index ec0212a21d6869eb6385031c9f808d14bda40463..0000000000000000000000000000000000000000 --- a/spaces/ltgoslo/ssa-perin/mtool/score/sdp.py +++ /dev/null @@ -1,147 +0,0 @@ -# Marco Kuhlmann - -import sys - -from score.core import anchor, intersect; - -class Measure(object): - - def __init__(self, get_items): - self.get_items = get_items - self.g = 0 - self.s = 0 - self.c = 0 - self.n_updates = 0 - self.n_matches = 0 - - def update(self, gold, system, gidentities, sidentities, trace = 0): - g_items = set(self.get_items(gold, gidentities)) - s_items = set(self.get_items(system, sidentities)) - self.g += len(g_items) - self.s += len(s_items) - self.c += len(g_items & s_items) - self.n_updates += 1 - self.n_matches += g_items == s_items - if trace: - return {"g": len(g_items), "s": len(s_items), - "c": len(g_items & s_items), "m": 1 if g_items == s_items else 0}; - - def p(self): - return self.c / self.s if self.s != 0 else 0.0 - - def r(self): - return self.c / self.g if self.g != 0 else 0.0 - - def f(self): - p = self.p() - r = self.r() - return 2 * p * r / (p + r) if p + r != 0 else 0.0 - - def m(self): - return self.n_matches / self.n_updates if self.n_updates != 0 else 0.0 - - def report(self): - json = {} - json["g"] = self.g - json["s"] = self.s - json["c"] = self.c - json["p"] = self.p() - json["r"] = self.r() - json["f"] = self.f() - json["m"] = self.m() - return json - -# def argument_predicate_dm(label): -# return True - -# def argument_predicate_pas(label): -# arguments = set("adj_ARG1 adj_ARG2 adj_MOD coord_ARG1 coord_ARG2 prep_ARG1 prep_ARG2 prep_ARG3 prep_MOD verb_ARG1 verb_ARG2 verb_ARG3 verb_ARG4 verb_MOD".split()) -# return label in arguments - -# def argument_predicate_psd(label): -# return label.endswith("-arg") - -class Scorer(object): - - def __init__(self, include_virtual=True): - self.measures = [] - self.measures.append(("labeled", Measure(self.get_itemsL))) - self.measures.append(("unlabeled", Measure(self.get_itemsU))) - # self.measureP = Measure(self.get_itemsP) - # self.measureF = Measure(self.get_itemsF) - # self.measureS = Measure(self.get_itemsS) - self.include_virtual = include_virtual - - def identify(self, id): - return self.identities[id] - - def get_itemsL(self, graph, identities): - result = {(identities[e.src], identities[e.tgt], e.lab) for e in graph.edges} - if self.include_virtual: - for node in graph.nodes: - if node.is_top: - result.add((-1, identities[node.id], None)) - return result - - def get_itemsU(self, graph, identities): - result = {(identities[e.src], identities[e.tgt]) for e in graph.edges} - if self.include_virtual: - for node in graph.nodes: - if node.is_top: - result.add((-1, identities[node.id])) - return result - - # def get_itemsP(self, graph): - # return {(frame[0], frame[2]) for frame in self.get_itemsF(graph)} - - # def get_itemsF(self, graph): - # result = set() - # for node in graph.nodes: - # if self.has_scorable_predicate(node): - # arguments = set() - # for edge in node.outgoing_edges: - # if self.argument_predicate(edge.lab): - # arguments.add(edge) - # extract = (node.id, node.sense, tuple(sorted(arguments))) - # result.add(extract) - # return result - - # def get_itemsS(self, graph): - # return {(frame[0], frame[1]) for frame in self.get_itemsF(graph)} - - # def argument_predicate(self, label): - # return True - - # def has_scorable_predicate(self, node): - # return node.pred and node.pos.startswith("V") - - # def show_predications(self, g): - # print(g.id) - # report_predications(self.complete_predications(g)) - - def update(self, g, s, trace): - gidentities = {node.id: tuple(anchor(node)) for node in g.nodes} - sidentities = {node.id: tuple(anchor(node)) for node in s.nodes} - scores = dict(); - for key, measure in self.measures: - score = measure.update(g, s, gidentities, sidentities, trace) - if trace: scores[key] = score; - return scores; - - def report(self, n, scores = None): - json = {"n": n} - for info, measure in self.measures: - json[info] = measure.report() - if scores is not None: json["scores"] = scores - return json - -def evaluate(gold, system, format = "json", trace = 0): - scorer = Scorer(include_virtual=True) - n = 0 - scores = dict() if trace else None - for g, s in intersect(gold, system): - score = scorer.update(g, s, trace) - n += 1 - if trace: scores[g.id] = score - result = scorer.report(n, scores) - return result diff --git a/spaces/luisoala/raw2logit/utils/augmentation.py b/spaces/luisoala/raw2logit/utils/augmentation.py deleted file mode 100644 index 6bf4cd14a646e6d52b0bb6214f591954ccb8c336..0000000000000000000000000000000000000000 --- a/spaces/luisoala/raw2logit/utils/augmentation.py +++ /dev/null @@ -1,132 +0,0 @@ -import random -import numpy as np - -import torch -import torchvision.transforms as T - - -class RandomRotate90(): # Note: not the same as T.RandomRotation(90) - def __call__(self, x): - x = x.rot90(random.randint(0, 3), dims=(-1, -2)) - return x - - def __repr__(self): - return self.__class__.__name__ - - -class AddGaussianNoise(): - def __init__(self, std=0.01): - self.std = std - - def __call__(self, x): - # noise = torch.randn_like(x) * self.std - # out = x + noise - # debug(x) - # debug(noise) - # debug(out) - return x + torch.randn_like(x) * self.std - - def __repr__(self): - return self.__class__.__name__ + f'(std={self.std})' - - -def set_global_seed(seed): - torch.random.manual_seed(seed) - np.random.seed(seed % (2**32 - 1)) - random.seed(seed) - - -class ComposeState(T.Compose): - def __init__(self, transforms): - self.transforms = [] - self.mask_transforms = [] - - for t in transforms: - apply_for_mask = True - if isinstance(t, tuple): - t, apply_for_mask = t - self.transforms.append(t) - if apply_for_mask: - self.mask_transforms.append(t) - - self.seed = None - - # @debug - def __call__(self, x, retain_state=False, mask_transform=False): - if self.seed is not None: # retain previous state - set_global_seed(self.seed) - if retain_state: # save state for next call - self.seed = self.seed or torch.seed() - set_global_seed(self.seed) - else: - self.seed = None # reset / ignore state - - transforms = self.transforms if not mask_transform else self.mask_transforms - for t in transforms: - x = t(x) - return x - - -augmentation_weak = ComposeState([ - T.RandomHorizontalFlip(), - T.RandomVerticalFlip(), - RandomRotate90(), -]) - - -augmentation_strong = ComposeState([ - T.RandomHorizontalFlip(p=0.5), - T.RandomVerticalFlip(p=0.5), - T.RandomApply([T.RandomRotation(90)], p=0.5), - # (transform, apply_to_mask=True) - (T.RandomApply([AddGaussianNoise(std=0.0005)], p=0.5), False), - (T.RandomAdjustSharpness(0.5, p=0.5), False), -]) - - -def get_augmentation(type): - if type == 'none': - return None - if type == 'weak': - return augmentation_weak - if type == 'strong': - return augmentation_strong - - -if __name__ == '__main__': - import os - if not os.path.exists('README.md'): - os.chdir('..') - - # from utils.debug import debug - from dataset import get_dataset - import matplotlib.pyplot as plt - - dataset = get_dataset('DS') # drone segmentation - img, mask = dataset[10] - mask = (mask + 0.2) / 1.2 - - plt.figure(figsize=(14, 8)) - plt.subplot(121) - plt.imshow(img) - plt.subplot(122) - plt.imshow(mask) - plt.suptitle('no augmentation') - plt.show() - - from utils.base import np2torch, torch2np - img, mask = np2torch(img), np2torch(mask) - - # from utils.augmentation import get_augmentation - augmentation = get_augmentation('strong') - - set_global_seed(1) - - for i in range(1, 4): - plt.figure(figsize=(14, 8)) - plt.subplot(121) - plt.imshow(torch2np(augmentation(img.unsqueeze(0), retain_state=True)).squeeze()) - plt.subplot(122) - plt.imshow(torch2np(augmentation(mask.unsqueeze(0), mask_transform=True)).squeeze()) - plt.suptitle(f'augmentation test {i}') - plt.show() diff --git a/spaces/lunarring/latentblending/ldm/models/diffusion/dpm_solver/__init__.py b/spaces/lunarring/latentblending/ldm/models/diffusion/dpm_solver/__init__.py deleted file mode 100644 index 7427f38c07530afbab79154ea8aaf88c4bf70a08..0000000000000000000000000000000000000000 --- a/spaces/lunarring/latentblending/ldm/models/diffusion/dpm_solver/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/spaces/ma-xu/LIVE/pybind11/tests/test_sequences_and_iterators.cpp b/spaces/ma-xu/LIVE/pybind11/tests/test_sequences_and_iterators.cpp deleted file mode 100644 index 1ce0451092b2050b4b5ef762c6a5e755237844d8..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/pybind11/tests/test_sequences_and_iterators.cpp +++ /dev/null @@ -1,358 +0,0 @@ -/* - tests/test_sequences_and_iterators.cpp -- supporting Pythons' sequence protocol, iterators, - etc. - - Copyright (c) 2016 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#include "pybind11_tests.h" -#include "constructor_stats.h" -#include -#include - -#include - -template -class NonZeroIterator { - const T* ptr_; -public: - NonZeroIterator(const T* ptr) : ptr_(ptr) {} - const T& operator*() const { return *ptr_; } - NonZeroIterator& operator++() { ++ptr_; return *this; } -}; - -class NonZeroSentinel {}; - -template -bool operator==(const NonZeroIterator>& it, const NonZeroSentinel&) { - return !(*it).first || !(*it).second; -} - -template -py::list test_random_access_iterator(PythonType x) { - if (x.size() < 5) - throw py::value_error("Please provide at least 5 elements for testing."); - - auto checks = py::list(); - auto assert_equal = [&checks](py::handle a, py::handle b) { - auto result = PyObject_RichCompareBool(a.ptr(), b.ptr(), Py_EQ); - if (result == -1) { throw py::error_already_set(); } - checks.append(result != 0); - }; - - auto it = x.begin(); - assert_equal(x[0], *it); - assert_equal(x[0], it[0]); - assert_equal(x[1], it[1]); - - assert_equal(x[1], *(++it)); - assert_equal(x[1], *(it++)); - assert_equal(x[2], *it); - assert_equal(x[3], *(it += 1)); - assert_equal(x[2], *(--it)); - assert_equal(x[2], *(it--)); - assert_equal(x[1], *it); - assert_equal(x[0], *(it -= 1)); - - assert_equal(it->attr("real"), x[0].attr("real")); - assert_equal((it + 1)->attr("real"), x[1].attr("real")); - - assert_equal(x[1], *(it + 1)); - assert_equal(x[1], *(1 + it)); - it += 3; - assert_equal(x[1], *(it - 2)); - - checks.append(static_cast(x.end() - x.begin()) == x.size()); - checks.append((x.begin() + static_cast(x.size())) == x.end()); - checks.append(x.begin() < x.end()); - - return checks; -} - -TEST_SUBMODULE(sequences_and_iterators, m) { - // test_sliceable - class Sliceable{ - public: - Sliceable(int n): size(n) {} - int start,stop,step; - int size; - }; - py::class_(m,"Sliceable") - .def(py::init()) - .def("__getitem__",[](const Sliceable &s, py::slice slice) { - ssize_t start, stop, step, slicelength; - if (!slice.compute(s.size, &start, &stop, &step, &slicelength)) - throw py::error_already_set(); - int istart = static_cast(start); - int istop = static_cast(stop); - int istep = static_cast(step); - return std::make_tuple(istart,istop,istep); - }) - ; - - // test_sequence - class Sequence { - public: - Sequence(size_t size) : m_size(size) { - print_created(this, "of size", m_size); - m_data = new float[size]; - memset(m_data, 0, sizeof(float) * size); - } - Sequence(const std::vector &value) : m_size(value.size()) { - print_created(this, "of size", m_size, "from std::vector"); - m_data = new float[m_size]; - memcpy(m_data, &value[0], sizeof(float) * m_size); - } - Sequence(const Sequence &s) : m_size(s.m_size) { - print_copy_created(this); - m_data = new float[m_size]; - memcpy(m_data, s.m_data, sizeof(float)*m_size); - } - Sequence(Sequence &&s) : m_size(s.m_size), m_data(s.m_data) { - print_move_created(this); - s.m_size = 0; - s.m_data = nullptr; - } - - ~Sequence() { print_destroyed(this); delete[] m_data; } - - Sequence &operator=(const Sequence &s) { - if (&s != this) { - delete[] m_data; - m_size = s.m_size; - m_data = new float[m_size]; - memcpy(m_data, s.m_data, sizeof(float)*m_size); - } - print_copy_assigned(this); - return *this; - } - - Sequence &operator=(Sequence &&s) { - if (&s != this) { - delete[] m_data; - m_size = s.m_size; - m_data = s.m_data; - s.m_size = 0; - s.m_data = nullptr; - } - print_move_assigned(this); - return *this; - } - - bool operator==(const Sequence &s) const { - if (m_size != s.size()) return false; - for (size_t i = 0; i < m_size; ++i) - if (m_data[i] != s[i]) - return false; - return true; - } - bool operator!=(const Sequence &s) const { return !operator==(s); } - - float operator[](size_t index) const { return m_data[index]; } - float &operator[](size_t index) { return m_data[index]; } - - bool contains(float v) const { - for (size_t i = 0; i < m_size; ++i) - if (v == m_data[i]) - return true; - return false; - } - - Sequence reversed() const { - Sequence result(m_size); - for (size_t i = 0; i < m_size; ++i) - result[m_size - i - 1] = m_data[i]; - return result; - } - - size_t size() const { return m_size; } - - const float *begin() const { return m_data; } - const float *end() const { return m_data+m_size; } - - private: - size_t m_size; - float *m_data; - }; - py::class_(m, "Sequence") - .def(py::init()) - .def(py::init&>()) - /// Bare bones interface - .def("__getitem__", [](const Sequence &s, size_t i) { - if (i >= s.size()) throw py::index_error(); - return s[i]; - }) - .def("__setitem__", [](Sequence &s, size_t i, float v) { - if (i >= s.size()) throw py::index_error(); - s[i] = v; - }) - .def("__len__", &Sequence::size) - /// Optional sequence protocol operations - .def("__iter__", [](const Sequence &s) { return py::make_iterator(s.begin(), s.end()); }, - py::keep_alive<0, 1>() /* Essential: keep object alive while iterator exists */) - .def("__contains__", [](const Sequence &s, float v) { return s.contains(v); }) - .def("__reversed__", [](const Sequence &s) -> Sequence { return s.reversed(); }) - /// Slicing protocol (optional) - .def("__getitem__", [](const Sequence &s, py::slice slice) -> Sequence* { - size_t start, stop, step, slicelength; - if (!slice.compute(s.size(), &start, &stop, &step, &slicelength)) - throw py::error_already_set(); - Sequence *seq = new Sequence(slicelength); - for (size_t i = 0; i < slicelength; ++i) { - (*seq)[i] = s[start]; start += step; - } - return seq; - }) - .def("__setitem__", [](Sequence &s, py::slice slice, const Sequence &value) { - size_t start, stop, step, slicelength; - if (!slice.compute(s.size(), &start, &stop, &step, &slicelength)) - throw py::error_already_set(); - if (slicelength != value.size()) - throw std::runtime_error("Left and right hand size of slice assignment have different sizes!"); - for (size_t i = 0; i < slicelength; ++i) { - s[start] = value[i]; start += step; - } - }) - /// Comparisons - .def(py::self == py::self) - .def(py::self != py::self) - // Could also define py::self + py::self for concatenation, etc. - ; - - // test_map_iterator - // Interface of a map-like object that isn't (directly) an unordered_map, but provides some basic - // map-like functionality. - class StringMap { - public: - StringMap() = default; - StringMap(std::unordered_map init) - : map(std::move(init)) {} - - void set(std::string key, std::string val) { map[key] = val; } - std::string get(std::string key) const { return map.at(key); } - size_t size() const { return map.size(); } - private: - std::unordered_map map; - public: - decltype(map.cbegin()) begin() const { return map.cbegin(); } - decltype(map.cend()) end() const { return map.cend(); } - }; - py::class_(m, "StringMap") - .def(py::init<>()) - .def(py::init>()) - .def("__getitem__", [](const StringMap &map, std::string key) { - try { return map.get(key); } - catch (const std::out_of_range&) { - throw py::key_error("key '" + key + "' does not exist"); - } - }) - .def("__setitem__", &StringMap::set) - .def("__len__", &StringMap::size) - .def("__iter__", [](const StringMap &map) { return py::make_key_iterator(map.begin(), map.end()); }, - py::keep_alive<0, 1>()) - .def("items", [](const StringMap &map) { return py::make_iterator(map.begin(), map.end()); }, - py::keep_alive<0, 1>()) - ; - - // test_generalized_iterators - class IntPairs { - public: - IntPairs(std::vector> data) : data_(std::move(data)) {} - const std::pair* begin() const { return data_.data(); } - private: - std::vector> data_; - }; - py::class_(m, "IntPairs") - .def(py::init>>()) - .def("nonzero", [](const IntPairs& s) { - return py::make_iterator(NonZeroIterator>(s.begin()), NonZeroSentinel()); - }, py::keep_alive<0, 1>()) - .def("nonzero_keys", [](const IntPairs& s) { - return py::make_key_iterator(NonZeroIterator>(s.begin()), NonZeroSentinel()); - }, py::keep_alive<0, 1>()) - ; - - -#if 0 - // Obsolete: special data structure for exposing custom iterator types to python - // kept here for illustrative purposes because there might be some use cases which - // are not covered by the much simpler py::make_iterator - - struct PySequenceIterator { - PySequenceIterator(const Sequence &seq, py::object ref) : seq(seq), ref(ref) { } - - float next() { - if (index == seq.size()) - throw py::stop_iteration(); - return seq[index++]; - } - - const Sequence &seq; - py::object ref; // keep a reference - size_t index = 0; - }; - - py::class_(seq, "Iterator") - .def("__iter__", [](PySequenceIterator &it) -> PySequenceIterator& { return it; }) - .def("__next__", &PySequenceIterator::next); - - On the actual Sequence object, the iterator would be constructed as follows: - .def("__iter__", [](py::object s) { return PySequenceIterator(s.cast(), s); }) -#endif - - // test_python_iterator_in_cpp - m.def("object_to_list", [](py::object o) { - auto l = py::list(); - for (auto item : o) { - l.append(item); - } - return l; - }); - - m.def("iterator_to_list", [](py::iterator it) { - auto l = py::list(); - while (it != py::iterator::sentinel()) { - l.append(*it); - ++it; - } - return l; - }); - - // test_sequence_length: check that Python sequences can be converted to py::sequence. - m.def("sequence_length", [](py::sequence seq) { return seq.size(); }); - - // Make sure that py::iterator works with std algorithms - m.def("count_none", [](py::object o) { - return std::count_if(o.begin(), o.end(), [](py::handle h) { return h.is_none(); }); - }); - - m.def("find_none", [](py::object o) { - auto it = std::find_if(o.begin(), o.end(), [](py::handle h) { return h.is_none(); }); - return it->is_none(); - }); - - m.def("count_nonzeros", [](py::dict d) { - return std::count_if(d.begin(), d.end(), [](std::pair p) { - return p.second.cast() != 0; - }); - }); - - m.def("tuple_iterator", &test_random_access_iterator); - m.def("list_iterator", &test_random_access_iterator); - m.def("sequence_iterator", &test_random_access_iterator); - - // test_iterator_passthrough - // #181: iterator passthrough did not compile - m.def("iterator_passthrough", [](py::iterator s) -> py::iterator { - return py::make_iterator(std::begin(s), std::end(s)); - }); - - // test_iterator_rvp - // #388: Can't make iterators via make_iterator() with different r/v policies - static std::vector list = { 1, 2, 3 }; - m.def("make_iterator_1", []() { return py::make_iterator(list); }); - m.def("make_iterator_2", []() { return py::make_iterator(list); }); -} diff --git a/spaces/magicr/BuboGPT/README.md b/spaces/magicr/BuboGPT/README.md deleted file mode 100644 index 825c07559e74a961e6d20f7fada7f59f82d4a519..0000000000000000000000000000000000000000 --- a/spaces/magicr/BuboGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: BuboGPT -emoji: 🏢 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the [paper page](https://huggingface.co/papers/2307.08581), the [project page](https://bubo-gpt.github.io/). \ No newline at end of file diff --git a/spaces/magicr/BuboGPT/match.py b/spaces/magicr/BuboGPT/match.py deleted file mode 100644 index 56a2d4fbf9db0fa8f04e19d98f0a1dcaefdd24ec..0000000000000000000000000000000000000000 --- a/spaces/magicr/BuboGPT/match.py +++ /dev/null @@ -1,184 +0,0 @@ -import os -import torch -import torch.nn as nn -import openai -import random -import contextlib - -from constants.constant import COLORS - - -@contextlib.contextmanager -def auto_proxy(): - use_proxy = "OPENAI_PROXY" in os.environ - if use_proxy: - os.environ['http_proxy'] = os.environ["OPENAI_PROXY"] - os.environ['https_proxy'] = os.environ["OPENAI_PROXY"] - - yield - - if use_proxy: - os.unsetenv('http_proxy') - os.unsetenv('https_proxy') - - -class MatchModule(nn.Module): - def __init__(self, device='cpu', model="gpt-3.5-turbo"): - super().__init__() - self.device = device - self.model = model - if "OPENAI_API_KEY" not in os.environ: - raise RuntimeError("Please specify your openai API key with the environment variable OPENAI_API_KEY") - openai.api_key = os.environ["OPENAI_API_KEY"] - self.examples = [ - ( - "['dog', 'sheepdog', 'grass', 'chase sheepdog', 'field', 'field park', 'grassy', 'corgi', 'brown dog', 'brown', 'park']" - "A brown dog running in the grassy field", - 'brown dog - brown dog\n' - 'grassy field - field' - ), - ( - "['man', 'ride', 'bicycle', 'red', 'passenger train', 'track']" - "A man riding a bicycle next to a red passenger train on the tracks.", - "man - man\n" - "bicycle - bicycle\n" - "red passenger train - passenger train\n" - "tracks - track" - ), - ( - "['horse', 'herd', 'dust', 'grassy', 'field']" - "The image shows a large herd of wild horses running across a wide, open field . " - "There are many horses running in different directions, with some running towards the camera " - "and others running towards the edge of the field. " - "The horses are brown and white, with some having manes and tails", - "herd - herd\n" - "wild horses - horse\n" - "field - field" - ), - ( - "['man', 'plate platter', 'sandwich', 'tablening table', 'saucer', 'coffee coffee cup', 'coffee', 'bean chip fry', 'chip fry', 'coffee cup', 'bean', 'food', 'table', 'restaurant']" - "The image shows a man sitting at a table , surrounded by a large amount of food and drinks . There is a chicken sandwich on the table, as well as a bowl of soup, potato wedges, and several fried potatoes. The man is holding a spoon, which he is expected to use to eat one of the wedges or possibly a piece of the chicken sandwich. The other items on the table, such as drinks and a bowl of soup, appear to be for those accompanying the man at the table. The scene takes place in a dining establishment , likely a restaurant , based on the presence of a spoon and food items on the table, along with a tablecloth and table setting. Additionally, the presence of several chairs and the overall setup suggest this is a formal, sit-down setting rather than a fast food or take-out restaurant. The amount of food on the table suggests that this is a hearty, satisfying meal, providing a range of flavors and textures that satisfy the palate.", - "man - man\n" - "table - table\n" - "food - food\n" - "chicken sandwich - sandwich\n" - "restaurant - restaurant\n" - "fried potatoes - chip fry\n" - "drinks - coffee" - ), - ( - "['bacon', 'silverware utensil', 'fork', 'coffee', 'table dinning table', 'plate platter', 'beverage', 'napkin', 'bread french toast pan', 'pine cone', 'coffee cup cup mug', 'fruit', 'breakfast food fruit', 'bacon', 'gravy', 'bread pancake']" - "The image presents a delicious breakfast setting on a wooden dining table. The main course is a white plate with French toast and bacon . Adding to the meal are a bottle of maple syrup and a cup of coffee , both placed next to the plate. The table is set with a fork , a knife, and a spoon, all arranged neatly around the plate. There are also a few apples scattered across the table, possibly serving as a healthy addition to the meal. Overall, the scene is inviting and warmly lit, making the breakfast look especially appetizing.", - "wooden dinning table - table dinning table\n" - "fork - fork\n" - "coffee - coffee\n" - "apples - fruit\n" - "white plate - plate platter\n" - "french toast - bread french toast pan\n" - "bacon - bacon" - ), - ( - "['woman', 'canopy', 'man', 'dog pet', 'dog', 'canopy', 'bicycle', 'person', 'leash', " - "'dog pet', 'leash', 'stall', 'person woman', 'dog pet', 'city street road', 'street scene']" - "The image captures a lively street scene with several people walking and riding bikes. " - "There are two bicycles in the picture, one located in the middle of the scene and the other towards " - "the right side. Among the people, some are walking close to the bicycles, while others are scattered" - "throughout the scene. In addition to the bicycles and people, there are four dogs in the picture, " - "adding to the liveliness of the scene. The dogs are walking around the street, mingling with the " - "pedestrians and bikers. The street is bustling with activity, as people, bikes, and dogs all " - "share the space and enjoy the day.", - "street scene - street scene\n" - "the street - city street road\n" - "bicycles - bicycle\n" - "four dogs - dog\n" - "people - person" - ) - ] - self.system_prompt = "You are a helpful assistant. Now I will give you a list of entities and give you a " \ - "paragraph or sentence. " \ - "you need to first extract the entity given in the text and then" \ - "find the corresponding entity having similar or identical meanings in the given list. " \ - "Find all the pairs." \ - "Are you clear? let us think step by step. " \ - "The extracted entities must come from the given text and the corresponding entity must " \ - "come from the given list. " \ - "If multiple entities can be linked to the same span of text or vice versa, " \ - "just keep one and do not merge them." \ - "Here is an example: ['dog', 'sheepdog', 'grass', 'chase sheepdog', 'field', " \ - "'field park', 'grassy', 'corgi', 'brown dog', 'brown', 'park'] " \ - "A brown dog running in the grassy field" \ - "The answer is: brown dog — brown dog \n grassy field — field" - - @torch.no_grad() - def forward(self, text, entity_state): - entity_list = list(entity_state['grounding']['local'].keys()) - message = [ - {"role": "system", "content": self.system_prompt}, - ] - for q, a in self.examples: - message.append({"role": "user", "content": q}) - message.append({"role": "system", "content": a}) - message.append({ - "role": "user", - "content": '{}{}'.format(entity_state['grounding']['local'].keys(), text) - }) - - print('==> Sending request to ChatGPT...') - with auto_proxy(): - resp = openai.ChatCompletion.create( - model=self.model, - messages=message - ) - ans = resp['choices'][0]['message']['content'] - print("===> In the matching module.") - print('==> Response from ChatGPT received: {}.'.format(ans)) - # print(resp) - items = ans.split('\n') - res = [] - match_state = {} - for i in items: - if ' - ' not in i: - continue - name, ref = i.split(' - ', maxsplit=1) - name, ref = name.lower(), ref.lower() - # NOTE: ref may not be contained in the original text, double check later. - if ref in entity_list: - color_name = entity_state['grounding']['local'][ref]['color'] - else: - print('pair {} - {} not found'.format(name, ref)) - # color_name = "grey" - continue - match_state[name] = ref - entity_idx = text.lower().find(name) - if entity_idx == -1: - entity_idx = text.lower().find(name.lower()) - ref = name - if entity_idx == -1: - continue - - res.append((name, ref, entity_idx, color_name)) - res = sorted(res, key=lambda x: x[2]) - # TODO: Bug to fix - highlight_output = [] - prev = 0 - color_map = {} - - for i, r in enumerate(res): - if r[2] < prev: - continue - # to avoid one-vs-many alignments - if r[2] != prev: - highlight_output.append((text[prev:r[2]], None)) - highlight_output.append((text[r[2]:r[2] + len(r[0])], f'{i + 1}')) - color_map[f'{i + 1}'] = r[-1] - prev = r[2] + len(r[0]) - if prev != len(text) - 1: - highlight_output.append((text[prev:], None)) - print("=======> Highlight Output: ", highlight_output) - return highlight_output, match_state, color_map - - -if __name__ == '__main__': - ner = MatchModule(model='gpt-4') - print( - ner('The image shows a resort with a large swimming pool surrounded by lounge chairs and umbrellas. There are several buildings in the background with white walls and blue roofs. There are sand dunes and palm trees in the background indicating that the resort is located in a desert area. The sky is clear and blue with a few fluffy clouds in the distance.')) diff --git a/spaces/mateuseap/magic-vocals/lib/infer_pack/modules/F0Predictor/__init__.py b/spaces/mateuseap/magic-vocals/lib/infer_pack/modules/F0Predictor/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/matthoffner/starchat-ui/components/Promptbar/components/PromptModal.tsx b/spaces/matthoffner/starchat-ui/components/Promptbar/components/PromptModal.tsx deleted file mode 100644 index 81bd26cedf428ba31308e0ce40024f0c237c6b0b..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/starchat-ui/components/Promptbar/components/PromptModal.tsx +++ /dev/null @@ -1,130 +0,0 @@ -import { FC, KeyboardEvent, useEffect, useRef, useState } from 'react'; - -import { useTranslation } from 'next-i18next'; - -import { Prompt } from '@/types/prompt'; - -interface Props { - prompt: Prompt; - onClose: () => void; - onUpdatePrompt: (prompt: Prompt) => void; -} - -export const PromptModal: FC = ({ prompt, onClose, onUpdatePrompt }) => { - const { t } = useTranslation('promptbar'); - const [name, setName] = useState(prompt.name); - const [description, setDescription] = useState(prompt.description); - const [content, setContent] = useState(prompt.content); - - const modalRef = useRef(null); - const nameInputRef = useRef(null); - - const handleEnter = (e: KeyboardEvent) => { - if (e.key === 'Enter' && !e.shiftKey) { - onUpdatePrompt({ ...prompt, name, description, content: content.trim() }); - onClose(); - } - }; - - useEffect(() => { - const handleMouseDown = (e: MouseEvent) => { - if (modalRef.current && !modalRef.current.contains(e.target as Node)) { - window.addEventListener('mouseup', handleMouseUp); - } - }; - - const handleMouseUp = (e: MouseEvent) => { - window.removeEventListener('mouseup', handleMouseUp); - onClose(); - }; - - window.addEventListener('mousedown', handleMouseDown); - - return () => { - window.removeEventListener('mousedown', handleMouseDown); - }; - }, [onClose]); - - useEffect(() => { - nameInputRef.current?.focus(); - }, []); - - return ( -
      -
      -
      -