diff --git a/spaces/0xSynapse/Image_captioner/app.py b/spaces/0xSynapse/Image_captioner/app.py
deleted file mode 100644
index 576dcbf9b1147ac748e08fb031d5e2b821434c29..0000000000000000000000000000000000000000
--- a/spaces/0xSynapse/Image_captioner/app.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#imported all required libraries
-import streamlit as st
-import torch
-import requests
-from PIL import Image
-from io import BytesIO
-from transformers import ViTFeatureExtractor, AutoTokenizer, VisionEncoderDecoderModel
-
-
-#used a pretrained model hosted on huggingface
-loc = "ydshieh/vit-gpt2-coco-en"
-
-feature_extractor = ViTFeatureExtractor.from_pretrained(loc)
-tokenizer = AutoTokenizer.from_pretrained(loc)
-model = VisionEncoderDecoderModel.from_pretrained(loc)
-model.eval()
-
-#defined a function for prediction
-
-def predict(image):
- pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values
-
- with torch.no_grad():
- output_ids = model.generate(pixel_values, max_length=16, num_beams=4, return_dict_in_generate=True).sequences
-
- preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
- preds = [pred.strip() for pred in preds]
-
- return preds
-
-#defined a function for Streamlit App
-def app():
- st.title("ImaginateAI")
- st.write("ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO Dataset was used for training. This image captioning model might have some biases that I couldn’t figure during testing")
- st.write("Upload an image or paste a URL to get predicted captions.")
-
- upload_option = st.selectbox("Choose an option:", ("Upload Image", "Paste URL"))
-
- if upload_option == "Upload Image":
- uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg"])
-
- if uploaded_file is not None:
- image = Image.open(uploaded_file)
- preds = predict(image)
- st.image(image, caption="Uploaded Image", use_column_width=True)
- st.write("Predicted Caption:", preds)
-
-
- elif upload_option == "Paste URL":
- image_url = st.text_input("Enter Image URL")
- if st.button("Submit") and image_url:
- try:
- response = requests.get(image_url, stream=True)
- image = Image.open(BytesIO(response.content))
- preds = predict(image)
- st.image(image, caption="Image from URL", use_column_width=True)
- st.write("Predicted Caption:", preds)
- except:
- st.write("Error: Invalid URL or unable to fetch image.")
-
-if __name__ == "__main__":
- app()
\ No newline at end of file
diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/__init__.py
deleted file mode 100644
index a2fc6f5af4a85304b0e23ceb07bfe844fc907f23..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/deepai/__init__.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import requests
-import json
-import hashlib
-import random
-import string
-from fake_useragent import UserAgent
-
-class ChatCompletion:
- @classmethod
- def md5(self, text):
- return hashlib.md5(text.encode()).hexdigest()[::-1]
-
- @classmethod
- def get_api_key(self, user_agent):
- part1 = str(random.randint(0, 10**11))
- part2 = self.md5(user_agent+self.md5(user_agent+self.md5(user_agent+part1+"x")))
- return f"tryit-{part1}-{part2}"
-
- @classmethod
- def create(self, messages):
- user_agent = UserAgent().random
- api_key = self.get_api_key(user_agent)
- headers = {
- "api-key": api_key,
- "user-agent": user_agent
- }
- files = {
- "chat_style": (None, "chat"),
- "chatHistory": (None, json.dumps(messages))
- }
-
- r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True)
-
- for chunk in r.iter_content(chunk_size=None):
- r.raise_for_status()
- yield chunk.decode()
-
-class Completion:
- @classmethod
- def create(self, prompt):
- return ChatCompletion.create([
- {
- "role": "user",
- "content": prompt
- }
- ])
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Golmaal Again 1080p Hd Hindi Full Fix Movie.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Golmaal Again 1080p Hd Hindi Full Fix Movie.md
deleted file mode 100644
index 529e34e1a0241826de5c5af6a1ff94075ae8a119..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Golmaal Again 1080p Hd Hindi Full Fix Movie.md
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
Golmaal Again: A Hilarious and Spooky Comedy
-
Golmaal Again is a 2017 Hindi comedy film directed by Rohit Shetty and starring Ajay Devgn, Parineeti Chopra, Tabu, Arshad Warsi, Tusshar Kapoor, Shreyas Talpade, Kunal Khemu, Prakash Raj and Neil Nitin Mukesh. It is the fourth installment of the Golmaal film series and a sequel to Golmaal 3 (2010).
-
The film follows the adventures of Gopal, Madhav, Lucky, Laxman 1 and Laxman 2, five friends who grew up in an orphanage and reunite after many years. They discover that their childhood friend Khushi, who they thought was dead, is actually alive and has some supernatural powers. They also encounter a ghost named Anna Mathew, who reveals some shocking secrets about their past.
Golmaal Again is a blend of comedy, horror and action that will keep you entertained throughout. The film has many hilarious scenes, such as the one where Gopal gets scared by a snake or the one where Laxman 2 mimics a lion. The film also has some emotional moments, such as the one where Khushi reunites with her long-lost father or the one where Gopal confesses his love for Khushi.
-
Golmaal Again is a blockbuster hit that grossed over â¹300 crore worldwide and became one of the highest-grossing Indian films of all time. The film received positive reviews from critics and audiences alike, who praised its humor, performances, music and direction. The film also won several awards, including the Filmfare Award for Best Actor in a Comic Role for Ajay Devgn.
-
If you are looking for a fun-filled and spooky movie to watch with your family or friends, Golmaal Again is the perfect choice for you. You can watch Golmaal Again in full HD quality on Disney+ Hotstar[^1^] [^2^] or Bilibili[^3^]. Don't miss this laughter riot that will make you go "Golmaal Golmaal"!
-
-
Golmaal Again is the fourth film in the Golmaal series, which started with Golmaal: Fun Unlimited (2006), followed by Golmaal Returns (2008) and Golmaal 3 (2010). The series is known for its slapstick comedy, quirky characters and catchy songs. The films are loosely inspired by the Hollywood comedy franchise Police Academy.
-
The film features a star-studded cast of actors who have worked together in previous Golmaal films. Ajay Devgn plays Gopal, the leader of the gang who is afraid of ghosts and loves Khushi. Parineeti Chopra plays Khushi, the bubbly and innocent girl who has a special connection with Gopal. Tabu plays Anna Mathew, the librarian and ghost whisperer who helps the gang solve the mystery. Arshad Warsi plays Madhav, the prankster who often clashes with Gopal. Tusshar Kapoor plays Lucky, the mute and funny member of the gang. Shreyas Talpade plays Laxman 1, the stammering and loyal friend of Gopal. Kunal Khemu plays Laxman 2, the smart and witty brother of Laxman 1. Prakash Raj plays Sheru Bhai, the don of Ooty who has a grudge against the gang. Neil Nitin Mukesh plays Nikhil, the son of Sheru Bhai who falls in love with Khushi.
-
The film also has some special appearances by actors such as Sanjay Mishra, Johnny Lever, Mukesh Tiwari, Vrajesh Hirjee, Murali Sharma and Sachin Khedekar. The film has a cameo by Nana Patekar, who lends his voice to the ghost of Jamnadas, the owner of the orphanage where the gang grew up. The film also features a song by Ranveer Singh, who dances with the gang in the end credits.
- 81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Calculus By Howard Anton 8th Edition Free !FULL! Download.md b/spaces/1gistliPinn/ChatGPT4/Examples/Calculus By Howard Anton 8th Edition Free !FULL! Download.md
deleted file mode 100644
index 6f0112c7188320bf5808e1ed1582c70bc9bee8c0..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Calculus By Howard Anton 8th Edition Free !FULL! Download.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Calculus By Howard Anton 8th Edition Free Download
-
-Howard Anton's Handbook of Exercise Calculus, 8th edition.Jun 05, 2015 • 75 Likes • 28,712 views. (Photo: Depositphotos) While we try to keep our promises, we must not forget that we still have to keep our promises. We must remember that not all of our promises are true and that we must be careful when we decide what we promise. This is especially true in the financial world. Our financial promises are not perfect, and they don't have to be. The financial promises don't change. 8a78ff9644
-
-
-
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Design My Home Makeover Games APK - A Fun and Relaxing Word Game with Home Decoration.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Design My Home Makeover Games APK - A Fun and Relaxing Word Game with Home Decoration.md
deleted file mode 100644
index f909d70498426aa1188480b3fc18d6a95fadd871..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Design My Home Makeover Games APK - A Fun and Relaxing Word Game with Home Decoration.md
+++ /dev/null
@@ -1,96 +0,0 @@
-
-
Design My Home: Makeover Games APK - A Fun and Creative Casual Game
-
Do you love home design and word games? If yes, then you will love Design My Home: Makeover Games APK, a casual game that combines both genres in a fun and creative way. In this game, you can design your dream home, solve word puzzles, and compete with other players around the world. Here is everything you need to know about this game, including how to download and install it, what are its features, and what are some tips and tricks for playing it.
-
What is Design My Home: Makeover Games?
-
Design My Home: Makeover Games is a casual game developed by Holy Cow Studio. The APK has been available since May 2020. Design My Home: Makeover Games has been downloaded 1+ million times. It's currently not in the top ranks on Google Play. It's rated 4.59 out of 5 stars, based on 22,537 ratings. The game has three main aspects:
A casual game that lets you design your dream home
-
In this game, you can unleash your inner designer and create beautiful rooms according to your taste and style. You can choose from hundreds of levels and rooms, such as living rooms, bedrooms, kitchens, bathrooms, gardens, and more. You can also customize every detail, such as the walls, floors, furniture, decor, lighting, plants, and accessories. You can mix and match different styles, colors, patterns, and textures to create your own unique designs.
-
A word game that challenges your vocabulary and creativity
-
To design each room, you need to solve word puzzles that are related to the theme of the room. For example, if you are designing a kitchen, you need to find words that are associated with cooking, food, utensils, appliances, etc. The word puzzles are in the form of crosswords or word searches. You need to swipe the letters on the screen to form words. You can use hints or shuffle the letters if you are stuck. The more words you find, the more coins you earn.
-
A social game that lets you share your designs and compete with others
-
You can also share your designs with other players and see their ratings and comments. You can also rate and comment on other players' designs and get inspired by their ideas. You can also join clubs and chat with other designers. You can also participate in tournaments and events where you can compete with other players for prizes and glory.
-
How to download and install Design My Home: Makeover Games APK?
-
If you want to play Design My Home: Makeover Games APK on your Android device, you need to follow these steps:
-
Download the APK file from a trusted source
-
You can download the APK file from [AppBrain](^1^), a reliable website that offers free APK downloads for Android apps. You can also scan the QR code on the website to download the file directly to your device. The APK file size is 99 MB and the latest version is 1.2.9.
-
Enable unknown sources on your device settings
-
Before you can install the APK file, you need to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device. Tap OK to proceed.
-
Install the APK file and enjoy the game
-
Once you have downloaded and enabled unknown sources, you can install the APK file by tapping on it. You may see a prompt that asks you to confirm the installation. Tap Install and wait for the process to finish. After the installation is complete, you can open the game and start designing your home.
-
What are the features of Design My Home: Makeover Games APK?
-
Design My Home: Makeover Games APK has many features that make it a fun and creative casual game. Here are some of them:
-
design my home makeover word life apk
-design my home makeover games offline apk
-design my home makeover games mod apk
-design my home makeover games free download apk
-design my home makeover games for android apk
-design my home makeover games latest version apk
-design my home makeover games hack apk
-design my home makeover games unlimited money apk
-design my home makeover games 3d apk
-design my home makeover games online apk
-design my home makeover games with friends apk
-design my home makeover games no ads apk
-design my home makeover games premium apk
-design my home makeover games pro apk
-design my home makeover games full apk
-design my home makeover games fun apk
-design my home makeover games best apk
-design my home makeover games new apk
-design my home makeover games update apk
-design my home makeover games puzzle apk
-design my home makeover games simulation apk
-design my home makeover games adventure apk
-design my home makeover games casual apk
-design my home makeover games creative apk
-design my home makeover games realistic apk
-design my home makeover games easy apk
-design my home makeover games challenging apk
-design my home makeover games relaxing apk
-design my home makeover games addictive apk
-design my home makeover games educational apk
-design my home makeover games family apk
-design my home makeover games kids apk
-design my home makeover games adults apk
-design my home makeover games girls apk
-design my home makeover games boys apk
-design my home makeover games cute apk
-design my home makeover games beautiful apk
-design my home makeover games awesome apk
-design my home makeover games amazing apk
-design my home makeover games cool apk
-
Hundreds of levels and rooms to design
-
The game has hundreds of levels and rooms that you can design according to your preferences. You can start with simple rooms like bedrooms and living rooms, and progress to more complex ones like kitchens and gardens. You can also unlock special rooms like studios, spas, cinemas, and more. Each room has a different theme and requirement that you need to follow. For example, some rooms may require you to use a certain style or color scheme, while others may give you more freedom to express yourself.
-
Thousands of furniture and decor items to choose from
-
The game has thousands of furniture and decor items that you can use to decorate your rooms. You can choose from different categories, such as sofas, beds, tables, chairs, lamps, rugs, paintings, plants, and more. You can also filter the items by style, color, price, or rating. You can also preview the items before you buy them with your coins. You can also rotate, resize, or move the items to fit your design.
-
Daily rewards and bonuses to boost your progress
-
The game also gives you daily rewards and bonuses that can help you progress faster in the game. You can get free coins, hints, shuffles, stars, or items every day by logging in, watching ads, or completing tasks. You can also get extra rewards by spinning the wheel of fortune or opening the mystery box. You can use these rewards to buy more items, solve more puzzles, or unlock more rooms.
-
Offline mode and cloud save support
-
The game also supports offline mode and cloud save features. This means that you can play the game without an internet connection and your progress will be saved automatically. You can also sync your progress across different devices by logging in with your Facebook account. This way, you can enjoy the game anytime and anywhere.
-
What are the tips and tricks for playing Design My Home: Makeover Games APK?
-
If you want to master Design My Home: Makeover Games APK, here are some tips and tricks that you can follow:
-
Use hints and shuffle when you are stuck on a word puzzle
-
Sometimes, you may find it hard to solve a word puzzle because you don't know the word or you can't see it on the screen. In that case, you can use hints or shuffle to help you out. Hints will reveal one letter of the word for you, while shuffle will rearrange the letters on the screen. You can get hints or shuffle by spending coins or watching ads.
-
Follow the design themes and requirements for each room
-
Each room in the game has a specific theme and requirement that you need to follow in order to complete it. For example, some rooms may require you to use a certain style or color scheme, while others may give you more freedom to express yourself. You can see the theme and requirement at the top of the screen when you enter a room. You can also see how many stars you need to earn in order to finish the room.
-
Collect stars and coins to unlock new items and rooms
-
Stars and coins are the main currencies in the game that you need to collect in order to unlock new items and rooms. You can earn stars by completing word puzzles and designing rooms according to their themes and requirements. You can earn coins by finding words, watching ads, spinning the wheel of fortune, opening the mystery box, or completing tasks. You can also buy coins with real money if you want.
-
Watch ads and complete tasks to earn extra rewards
-
Another way to earn more stars and coins in the game is to watch ads and complete tasks. You can watch ads to get free hints, shuffles, coins, or items. You can also complete tasks that are given to you by the game or by other players. These tasks may involve designing a specific room, finding a certain word, or rating other players' designs. You can get rewards such as coins, stars, items, or badges for completing these tasks.
-
Conclusion
-
Design My Home: Makeover Games APK is a fun and creative casual game that lets you design your dream home, solve word puzzles, and compete with other players. You can download and install the APK file from a trusted source and enjoy the game on your Android device. You can also explore the features of the game, such as hundreds of levels and rooms, thousands of furniture and decor items, daily rewards and bonuses, offline mode and cloud save support, and more. You can also follow some tips and tricks to master the game, such as using hints and shuffle, following the design themes and requirements, collecting stars and coins, watching ads and completing tasks, and more. If you love home design and word games, you should give Design My Home: Makeover Games APK a try.
-
FAQs
-
Here are some frequently asked questions about Design My Home: Makeover Games APK:
-
Q: Is Design My Home: Makeover Games APK safe to download and install?
-
A: Yes, Design My Home: Makeover Games APK is safe to download and install as long as you get it from a trusted source like AppBrain. You should also enable unknown sources on your device settings before installing the APK file.
-
Q: How can I update Design My Home: Makeover Games APK?
-
A: You can update Design My Home: Makeover Games APK by downloading the latest version of the APK file from AppBrain or by checking for updates within the game. You should always update the game to enjoy the latest features and bug fixes.
-
Q: How can I contact the developer of Design My Home: Makeover Games APK?
-
A: You can contact the developer of Design My Home: Makeover Games APK by sending an email to support@holycow.studio or by visiting their website at https://holycow.studio/. You can also follow them on Facebook at https://www.facebook.com/holycowstudio/ or on Instagram at https://www.instagram.com/holycowstudio/.
-
Q: How can I delete Design My Home: Makeover Games APK from my device?
-
A: You can delete Design My Home: Makeover Games APK from your device by going to Settings > Apps > Design My Home > Uninstall. You can also delete the APK file from your device storage if you don't need it anymore.
-
Q: Can I play Design My Home: Makeover Games APK on my PC or laptop?
-
A: Yes, you can play Design My Home: Makeover Games APK on your PC or laptop by using an Android emulator like BlueStacks or NoxPlayer. These emulators will allow you to run Android apps on your PC or laptop. However, you may experience some performance issues or compatibility problems depending on your device specifications.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bloons TD 6 on PC and Experience the Most Epic Tower Defense Game.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bloons TD 6 on PC and Experience the Most Epic Tower Defense Game.md
deleted file mode 100644
index 0be8409207ba124514923c8e14d5f51b3678dcc7..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bloons TD 6 on PC and Experience the Most Epic Tower Defense Game.md
+++ /dev/null
@@ -1,152 +0,0 @@
-
-
Bloons TD 6: A Fun and Challenging Tower Defense Game
-
If you are a fan of tower defense games, you might have heard of Bloons TD 6, the latest installment in the Bloons Tower Defense series by Ninja Kiwi. Bloons TD 6 is a strategy game that challenges you to pop all the invading balloons (or bloons) before they reach the end of the track. You can use a variety of monkey towers, heroes, and powers to stop them. But be careful, as the bloons come in different shapes, sizes, and abilities, and some of them are very tough to pop.
In this article, we will give you an overview of what Bloons TD 6 is, how to download and play it on your PC, and how to master it with some useful tips and tricks. Let's get started!
-
What is Bloons TD 6?
-
Bloons TD 6 is a strategy based video game developed and published by Ninja Kiwi. It was released on June 13, 2018 for Android and iOS, and later brought to Steam for Windows and Macintosh. It is the sixth main game in the Bloons Tower Defense series, which started in 2007 as a web browser game.
-
Bloons TD 6 follows the same tower defense formula as its predecessors, but it also introduces some new features and improvements that make it more fun and challenging. Here are some of the main aspects of the game:
-
The basic gameplay of Bloons TD 6
-
The core gameplay of Bloons TD 6 is simple: you have to prevent the bloons from reaching the end of the track by placing monkey towers along the way. Each monkey tower has a different attack range, rate, damage, and cost. You can also upgrade your towers to make them more powerful or give them special abilities.
-
There are four classes of monkey towers: Primary, Military, Magic, and Support. Each class has its own strengths and weaknesses against different types of bloons. For example, Primary towers are good at popping regular bloons, but they struggle against lead or camo bloons. Military towers are good at popping camo bloons, but they are weak against purple bloons. Magic towers can pop almost any bloon, but they are expensive and have low pierce. Support towers do not attack directly, but they provide buffs or debuffs to other towers or bloons.
-
bloons td 6 pc download full version free
-how to get bloons td 6 for free on pc
-bloons td 6 free download windows 10
-bloons td 6 pc game free download
-bloons tower defense 6 free download pc
-bloons td 6 online free no download pc
-bloons td 6 steam free download pc
-bloons td 6 apk free download pc
-bloons td 6 cracked download pc free
-bloons td 6 mod free download pc
-bloons td 6 emulator free download pc
-bloons td 6 bluestacks free download pc
-bloons td 6 play online for free on pc
-bloons td 6 strategy game free download pc
-bloons td 6 latest version free download pc
-bloons td 6 update free download pc
-bloons td 6 multiplayer free download pc
-bloons td 6 co op free download pc
-bloons td 6 sandbox mode free download pc
-bloons td 6 cheats free download pc
-bloons td 6 hack free download pc
-bloons td 6 trainer free download pc
-bloons td 6 save file free download pc
-bloons td 6 custom maps free download pc
-bloons td 6 editor free download pc
-bloons td 6 best towers free download pc
-bloons td 6 tier list free download pc
-bloons td 6 heroes guide free download pc
-bloons td 6 tips and tricks free download pc
-bloons td 6 walkthrough free download pc
-bloons td 6 achievements free download pc
-bloons td 6 challenges free download pc
-bloons td 6 daily rewards free download pc
-bloons td 6 monkey knowledge free download pc
-bloons td 6 insta monkeys free download pc
-bloons td 6 skins free download pc
-bloons td 6 soundtrack free download pc
-bloons td 6 wallpaper free download pc
-bloons td 6 review free download pc
-bloons td 6 reddit free download pc
-bloons td 6 discord server free download pc
-bloons td 6 wiki free download pc
-bloons td 6 official website free download pc
-bloons td 6 system requirements free download pc
-how to install bloons td 6 for free on pc
-how to play bloons td 6 offline for free on pc
-how to transfer bloons td 6 data for free on pc
-how to unlock all monkeys in bloons td 6 for free on pc
-how to get unlimited money in bloons td 6 for free on pc
-
You can choose from several game modes and difficulty levels to suit your preference and skill level. The game modes include Standard (the normal mode), Impoppable (the hardest mode), CHIMPS (a mode where you cannot use Continues, Hearts Lost, Income, Monkey Knowledge, Powers or Selling), Sandbox (a mode where you can test your strategies with unlimited money and lives), Races (a mode where you compete with other players to finish a map as fast as possible), Co-Op (a mode where you can team up with up to three other players), Odysseys (a mode where you have to complete a series of maps with limited tower choices), Boss Events (a mode where you have to face powerful boss bloons), Contested Territory (a 2Ghz or better | | Memory: 4096 MB RAM | Memory: 8192 MB RAM | | Graphics: OpenGL 2.0 compatible, ATI, Nvidia or Intel HD | Graphics: OpenGL 2.0 compatible, ATI, Nvidia or Intel HD | | Storage: 2048 MB available space | Storage: 4096 MB available space | | Sound Card: Windows compatible sound card | Sound Card: Windows compatible sound card |
The steps to download and install Bloons TD 6 on PC
-
Depending on your preference, you can choose one of the following methods to download and install Bloons TD 6 on your PC:
-
Using BlueStacks emulator
-
BlueStacks is a popular Android emulator that allows you to run Android apps and games on your PC. You can use BlueStacks to play Bloons TD 6 on your PC with the same features and performance as on your mobile device. Here are the steps to do this:
Launch BlueStacks and sign in with your Google account.
-
Go to the Google Play Store app and search for Bloons TD 6.
-
Click on the Install button and wait for the download and installation to complete.
-
Go to the My Apps tab and click on the Bloons TD 6 icon to launch the game.
-
Enjoy playing Bloons TD 6 on your PC with BlueStacks.
-
-
Using Steam or Epic Games store
-
Steam and Epic Games store are two of the most popular platforms for PC gaming. You can use either of them to buy and play Bloons TD 6 on your PC with enhanced graphics and controls. Here are the steps to do this:
Create an account and sign in to Steam or Epic Games store.
-
Go to the Store page and search for Bloons TD 6.
-
Click on the Add to Cart button and proceed to checkout.
-
Pay for the game using your preferred payment method.
-
Go to the Library page and click on the Bloons TD 6 icon to launch the game.
-
Enjoy playing Bloons TD 6 on your PC with Steam or Epic Games store.
-
-
How to master Bloons TD 6?
-
Bloons TD 6 is a fun and challenging game that requires strategy, skill, and creativity. If you want to master the game and beat all the levels, modes, and challenges, you need to learn some tips and tricks that can help you improve your gameplay. Here are some of them:
-
The best strategies, tips, and tricks for Bloons TD 6
-
Bloons TD 6 is a game that has many variables and possibilities. There is no one best strategy or solution for every situation. However, there are some general principles and guidelines that can help you make better decisions and optimize your performance. Here are some of them:
-
Choosing the right monkeys, upgrades, and heroes
-
The first step to mastering Bloons TD 6 is choosing the right monkeys, upgrades, and heroes for each map, mode, and difficulty. You need to consider several factors such as the track layout, the bloon types, the available money, the tower restrictions, and the synergy between different towers. You also need to experiment with different combinations and see what works best for you.
-
Some of the most popular and effective monkeys in Bloons TD 6 are:
-
-
Ninja Monkey: A versatile tower that can pop camo bloons, throw shurikens at high speed, slow down bloons with caltrops or sabotage supply lines, and deal massive damage with grandmaster ninja or master bomber upgrades.
-
Dartling Gunner: A powerful tower that can shoot darts in any direction you point your cursor, pierce through multiple bloons with faster barrel spin or hydro rocket pods upgrades, deal extra damage to MOAB-class bloons with mad shredder or rocket storm upgrades, and unleash a devastating ray of doom or plasma accelerator upgrade.
-
Sun Avatar: A super monkey that can shoot sun beams that pop multiple layers of bloons, create mini sun avatars with sun temple or sun temple upgrades, and become the ultimate bloon destroyer with the true sun god or the legend of the night upgrade.
-
Alchemist: A support tower that can buff other towers with acid mixture dip or stronger stimulant upgrades, pop lead and fortified bloons with acidic mixture dip or unstable concoction upgrades, generate extra income with rubber to gold or lead to gold upgrades, and transform into a powerful spellcaster with total transformation or permanent brew upgrades.
-
Banana Farm: An income tower that can produce bananas that give you money when collected, increase your banana production with greater production or valuable bananas upgrades, create banana crates or banks that store more money, and generate more income with monkey-nomics or monkey wall street upgrades.
-
-
Some of the most popular and effective heroes in Bloons TD 6 are:
-
-
Quincy: A well-rounded hero that can shoot multiple arrows at once, deal extra damage to MOAB-class bloons with explosive arrows or storm of arrows abilities, and boost the attack speed and pierce of nearby primary monkeys with rapid shot or arrow barrage abilities.
-
Gwendolin: A fiery hero that can set bloons on fire, deal extra damage to ceramic and fortified bloons with firestorm or cocktail of fire abilities, and boost the damage and range of nearby fire-based monkeys with heat it up or firestorm abilities.
-
Benjamin: A hacker hero that can generate extra income with skimming or biohack abilities, hack bloons to make them weaker or give more money with trojan or syphon funding abilities, and disable the abilities of nearby monkeys with bloon trojan or biohack abilities.
-
Adora: A divine hero that can shoot powerful bolts of light, deal massive damage to all bloons on screen with ball of light or blood sacrifice abilities, and sacrifice nearby towers to gain power and level up faster with blood sacrifice or true sun god abilities.
-
-
Placing your towers wisely and using abilities effectively
-
The second step to mastering Bloons TD 6 is placing your towers wisely and using their abilities effectively. You need to consider several factors such as the track layout, the line of sight, the range, the cost, the synergy, and the timing. You also need to experiment with different placements and see what works best for you.
-
Some of the general tips for placing your towers are:
-
-
Place your towers near the start or the end of the track to maximize their attack time and damage output.
-
Place your towers near curves or intersections to maximize their attack range and pierce.
-
Place your towers on elevated platforms or water spots to avoid line-of-sight issues and gain access to exclusive towers.
-
Place your towers in clusters or groups to benefit from buffs or debuffs from other towers.
-
Place your towers strategically to cover different types of bloons and create choke points.
-
-
Some of the general tips for using your abilities are:
-
-
Use your abilities when you are facing a large wave of bloons or a tough boss bloon.
-
Use your abilities in combination with other abilities or powers to create a powerful effect.
-
Use your abilities sparingly and wisely, as they have a cooldown time and a limited number of uses per game.
-
Use your abilities according to the situation and the type of ability. For example, use offensive abilities to deal damage, defensive abilities to protect yourself, support abilities to buff yourself or debuff enemies, and income abilities to generate money.
-
-
Gaining experience and unlocking new content
-
The third step to mastering Bloons TD 6 is gaining experience and unlocking new content. You need to play the game regularly and complete different levels, modes, and challenges to earn experience points (XP) and monkey money (MM). You can use XP to unlock new towers, upgrades, heroes, maps, modes, and achievements. You can use MM to buy new powers, insta-monkeys, skins, trophies, and more. You can also earn trophies by completing achievements or participating in events. You can use trophies to buy cosmetic items from the trophy store.
-
Some of the ways to gain more XP and MM are:
-
-
Play on higher difficulty levels and harder game modes to earn more XP and MM per game.
-
Play on different maps and use different towers and heroes to earn more XP for each tower and hero class.
-
Play on co-op mode and team up with other players to earn more XP and MM per game.
-
Play on races mode and compete with other players to earn more XP and MM per game.
-
Play on odysseys mode and complete a series of maps with limited tower choices to earn more XP and MM per game.
-
Play on boss events mode and defeat powerful boss bloons to earn more XP and MM per game.
-
Play on daily challenges mode and complete maps with specific rules and restrictions to earn more XP and MM per game.
-
Use monkey knowledge points (MKP) to unlock passive bonuses and perks for your towers, heroes, powers, and income.
-
-
Conclusion
-
Bloons TD 6 is a fun and challenging tower defense game that offers hours of entertainment and replay value. You can enjoy the game on your mobile device or your PC, and you can customize your game experience with various features and options. You can also improve your skills and strategies by learning from the tips and tricks we shared in this article. We hope you found this article helpful and informative, and we wish you good luck and have fun popping bloons!
-
FAQs
-
Here are some of the frequently asked questions about Bloons TD 6:
-
-
Q: How much does Bloons TD 6 cost?
-
A: Bloons TD 6 costs $4.99 on Android and iOS, $9.99 on Steam, and $14.99 on Epic Games store. However, the game often goes on sale or offers discounts, so you can check the prices regularly to find the best deal.
-
Q: Is Bloons TD 6 online or offline?
-
A: Bloons TD 6 can be played both online and offline. You can play online to access all the features and content of the game, such as co-op mode, races mode, boss events mode, content browser, trophy store, etc. You can also play offline to enjoy the game without an internet connection, but you will not be able to access some of the features and content of the game.
-
Q: Is Bloons TD 6 cross-platform?
-
A: Bloons TD 6 is cross-platform between Android, iOS, Windows, and Macintosh. You can play with other players or transfer your progress across different devices using the same Ninja Kiwi account. However, Bloons TD 6 is not cross-platform with Epic Games store, so you cannot play with or transfer your progress to or from Epic Games store users.
-
Q: Is Bloons TD 6 free to play?
-
A: Bloons TD 6 is not free to play, as you have to buy the game to play it. However, Bloons TD 6 does not have any in-app purchases or microtransactions that require real money. You can earn all the in-game currency and items by playing the game normally.
-
Q: Is Bloons TD 6 multiplayer?
-
A: Bloons TD 6 is multiplayer, as you can play with up to three other players in co-op mode. You can also compete with other players in races mode or contested territory mode. You can also chat with other players using the in-game chat feature or join a clan to socialize with other players.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Baixe o Livro de Regras do RPG Ordem Paranormal criado por Cellbit e seus amigos.md b/spaces/1phancelerku/anime-remove-background/Baixe o Livro de Regras do RPG Ordem Paranormal criado por Cellbit e seus amigos.md
deleted file mode 100644
index eee32dd6e44472a886106605b6ff2d1e320b6dd5..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Baixe o Livro de Regras do RPG Ordem Paranormal criado por Cellbit e seus amigos.md
+++ /dev/null
@@ -1,124 +0,0 @@
-
-
Livro de Regras RPG Cellbit PDF Download: Tudo o que você precisa saber
-
Você é fã do RPG Ordem Paranormal, criado pelo youtuber Cellbit? Quer saber como baixar o livro de regras oficial do jogo em PDF? Então você veio ao lugar certo. Neste artigo, vamos te contar tudo o que você precisa saber sobre o livro de regras RPG Cellbit PDF download, incluindo:
Como baixar o livro de regras oficial do jogo em PDF;
-
Quais são as principais novidades e características do livro;
-
Onde assistir as campanhas de Cellbit e seus convidados;
-
Como adaptar o cenário para outros sistemas de RPG.
-
-
Ficou curioso? Então continue lendo e descubra tudo sobre o livro de regras RPG Cellbit PDF download.
-
O que é o RPG Ordem Paranormal e como jogar
-
O RPG Ordem Paranormal é um jogo de mesa criado por Rafael Lange, mais conhecido como Cellbit, um dos maiores streamers e youtubers do Brasil. O jogo se passa em um universo onde a realidade é ameaçada por criaturas sobrenaturais do Outro Lado, e os jogadores interpretam agentes da Ordem da Realidade, uma organização secreta que luta para proteger o mundo dos perigos paranormais.
-
O jogo é baseado no sistema d20, o mesmo usado em Dungeons & Dragons e Pathfinder, mas com algumas adaptações e inovações feitas por Cellbit e sua equipe. O jogo usa dados de 20 lados (d20) para resolver as ações dos personagens, e cada personagem tem atributos, habilidades, equipamentos e rituais que definem suas capacidades e características.
-
livro de regras rpg cellbit pdf grátis
-como baixar o livro de regras rpg cellbit
-ordem paranormal rpg livro de regras pdf
-livro de regras rpg cellbit online
-livro de regras rpg cellbit jambo editora
-resenha do livro de regras rpg cellbit
-livro de regras rpg cellbit capa dura
-livro de regras rpg cellbit versão final
-livro de regras rpg cellbit ficha de personagem
-livro de regras rpg cellbit preço
-livro de regras rpg cellbit comprar
-livro de regras rpg cellbit pré-venda
-livro de regras rpg cellbit lançamento
-livro de regras rpg cellbit sinopse
-livro de regras rpg cellbit pdf completo
-livro de regras rpg cellbit epub
-livro de regras rpg cellbit mobi
-livro de regras rpg cellbit amazon
-livro de regras rpg cellbit mercado livre
-livro de regras rpg cellbit submarino
-livro de regras rpg cellbit americanas
-livro de regras rpg cellbit saraiva
-livro de regras rpg cellbit cultura
-livro de regras rpg cellbit travessa
-livro de regras rpg cellbit martins fontes
-livro de regras rpg cellbit leitura
-livro de regras rpg cellbit pdf drive
-livro de regras rpg cellbit pdf mega
-livro de regras rpg cellbit pdf mediafire
-livro de regras rpg cellbit pdf 4shared
-livro de regras rpg cellbit pdf dropbox
-livro de regras rpg cellbit pdf archive.org
-livro de regras rpg cellbit pdf studocu
-livro de regras rpg cellbit pdf scribd
-livro de regras rpg cellbit pdf academia.edu
-resumo do livro de regras rpg cellbit pdf
-análise do livro de regras rpg cellbit pdf
-crítica do livro de regras rpg cellbit pdf
-opinião sobre o livro de regras rpg cellbit pdf
-comentários sobre o livro de regras rpg cellbit pdf
-dicas para o livro de regras rpg cellbit pdf
-guia para o livro de regras rpg cellbit pdf
-tutorial para o livro de regras rpg cellbit pdf
-vídeo sobre o livro de regras rpg cellbit pdf
-podcast sobre o livro de regras rpg cellbit pdf
-blog sobre o livro de regras rpg cellbit pdf
-site sobre o livro de regras rpg cellbit pdf
-fórum sobre o livro de regras rpg cellbit pdf
-grupo sobre o livro de regras rpg cellbit pdf
-comunidade sobre o livro de regras rpg cellbit pdf
-
O jogo também tem um forte foco na narrativa e na interpretação dos personagens, incentivando os jogadores a criarem histórias envolventes e imersivas. O jogo é dividido em capítulos, que são sessões de jogo com um objetivo definido. Cada capítulo pode ser jogado em uma ou mais sessões, dependendo do ritmo e do estilo dos jogadores e do mestre.
-
Como baixar o livro de regras oficial do jogo em PDF
-
O livro de regras oficial do RPG Ordem Paranormal foi produzido por Cellbit em parceria com a editora Jambô, e está disponível para compra no site da editora. O livro tem mais de 300 páginas e contém todas as regras para criar personagens, conduzir as aventuras, usar os rituais e enfrentar os inimigos do Outro Lado.
-
Além disso, o livro também traz informações sobre o cenário do jogo, incluindo a história da Ordem da Realidade, as principais organizações e facções do mundo, os tipos de criaturas sobrenaturais que existem no Outro Lado e as regiões onde as aventuras podem se passar.
-
Para baixar o livro de regras RPG Cellbit PDF download, você precisa comprar o livro físico no site da Jambô. Ao fazer isso, você recebe um link para baixar o arquivo em PDF do livro. Você pode ler o livro no seu computador, tablet ou celular, ou imprimir uma cópia para usar nas suas sessões de jogo.
-
Quais são as principais novidades e características do livro
-
O livro de regras RPG Cellbit PDF download traz várias novidades e características que tornam o jogo único e divertido. Algumas delas são:
-
-
O sistema de flashbacks: uma mecânica que permite aos jogadores voltarem no tempo para mostrar cenas que aconteceram antes ou durante a aventura, revelando aspectos importantes dos personagens e da trama;
-
O sistema de rituais: uma mecânica que permite aos personagens usarem o poder do Outro Lado para realizar feitos extraordinários, como invocar entidades, manipular a realidade, curar ferimentos e muito mais. Os rituais são baseados em símbolos, ingredientes e palavras de poder, e exigem um teste de habilidade para serem bem-sucedidos;
-
O sistema de sanidade: uma mecânica que representa o impacto psicológico que as situações paranormais causam nos personagens. Os personagens podem perder pontos de sanidade ao presenciarem cenas de horror, violência ou mistério, e podem sofrer consequências como medos, fobias, alucinações e até mesmo insanidade;
-
O sistema de equipamentos: uma mecânica que permite aos personagens usarem diversos tipos de itens para auxiliar nas suas missões, como armas, veículos, dispositivos tecnológicos, itens mágicos e muito mais. Os equipamentos têm características como custo, peso, dano, alcance e efeitos especiais;
-
O sistema de criação de personagens: uma mecânica que permite aos jogadores criarem seus próprios agentes da Ordem da Realidade, escolhendo entre seis arquétipos (Atirador, Combatente, Detetive, Hacker, Médico e Ocultista), seis origens (Americano, Brasileiro, Chinês, Europeu, Indiano e Russo), seis especializações (Armas Brancas, Armas de Fogo, Conhecimento Paranormal, Hacking, Medicina e Rituais) e seis traços (Ambicioso, Corajoso, Curioso, Leal, Protetor e Sarcástico). Os personagens também têm um nome, uma aparência, um histórico e uma motivação pessoal.
-
-
Onde assistir as campanhas de Cellbit e seus convidados
-
Se você quer se inspirar para jogar o RPG Ordem Paranormal ou simplesmente se divertir assistindo as aventuras de Cellbit e seus convidados, você pode acompanhar as campanhas que ele transmite em seu canal do YouTube. Até o momento, ele já realizou três campanhas completas:
-
-
A primeira campanha foi chamada de O Segredo na Floresta e teve 10 capítulos. Ela contou com a participação dos youtubers T3ddy, Saiko, MeiaUm, Pk Regular Game e Gab Araújo. A história girou em torno de um grupo de amigos que foi acampar em uma floresta misteriosa e acabou se envolvendo em uma trama sobrenatural cheia de suspense e terror;
-
A segunda campanha foi chamada de Vampiro a Máscara e teve 12 capítulos. Ela contou com a participação dos youtubers Calango, Felps, Luba, Maethe e Rodrigo Coelho. A história se baseou no cenário de Vampiro: A Máscara, um clássico RPG de horror pessoal, e acompanhou a jornada de um grupo de vampiros recém-criados que tentaram sobreviver em uma cidade dominada por intrigas, conspirações e conflitos entre as seitas vampíricas;
-
A terceira campanha foi chamada de Alice e teve 13 capítulos. Ela contou com a participação dos youtubers Alan, Edu, Guaxinim, Jean L e Mariana. A história foi inspirada no livro Alice no País das Maravilhas, de Lewis Carroll, e seguiu as aventuras de uma garota que foi transportada para um mundo fantástico e bizarro, onde teve que enfrentar diversos desafios e perigos para encontrar o seu caminho de volta.
-
-
Você pode assistir todas essas campanhas no canal do Cellbit no YouTube, na playlist chamada RPG Ordem Paranormal. Você também pode conferir as artes, os memes, os comentários e as teorias dos fãs nas redes sociais, usando as hashtags #OrdemParanormal, #OSegredoNaFloresta, #VampiroAMascara e #Alice.
-
Como adaptar o cenário para outros sistemas de RPG
-
Se você gostou do cenário do RPG Ordem Paranormal, mas prefere usar outro sistema de RPG para jogar, não se preocupe. Você pode adaptar o cenário para o sistema que você quiser, usando algumas dicas simples:
-
-
Escolha um sistema que seja compatível com o gênero e o tom do cenário. Por exemplo, se você quer jogar uma aventura de terror e suspense, você pode usar sistemas como Call of Cthulhu, World of Darkness ou GURPS Horror. Se você quer jogar uma aventura de ação e aventura, você pode usar sistemas como Savage Worlds, Fate ou GURPS Action;
-
Use as regras do sistema escolhido para criar os personagens, os inimigos, os equipamentos e os rituais. Você pode usar as informações do livro de regras RPG Cellbit PDF download como referência, mas não precisa seguir tudo à risca. Você pode modificar ou simplificar as regras conforme a sua preferência e a necessidade da sua história;
-
Use a sua criatividade para criar as suas próprias aventuras ou adaptar as aventuras existentes. Você pode usar as campanhas de Cellbit como inspiração, mas não precisa copiar tudo exatamente. Você pode mudar os personagens, os locais, os eventos e os desfechos conforme o seu gosto e o dos seus jogadores.
-
-
O importante é se divertir e aproveitar o cenário do RPG Ordem Paranormal da melhor forma possível.
-
Conclusão
-
Neste artigo, você aprendeu tudo o que precisa saber sobre o livro de regras RPG Cellbit PDF download. Você viu o que é o RPG Ordem Paranormal e como jogar, como baixar o livro de regras oficial do jogo em PDF, quais são as principais novidades e características do livro, onde assistir as campanhas de Cellbit e seus convidados e como adaptar o cenário para outros sistemas de RPG.
-
Agora você está pronto para se tornar um agente da Ordem da Realidade e viver aventuras incríveis no universo do RPG Ordem Paranormal. Esperamos que você tenha gostado deste artigo e que ele tenha sido útil para você. Se você tiver alguma dúvida ou sugestão, deixe um comentário abaixo. E se você quiser ler mais artigos sobre RPGs e outros assuntos interessantes, continue acompanhando o nosso blog.
-
FAQs
-
O que é o Outro Lado?
-
O Outro Lado é a dimensão paralela onde habitam as criaturas sobrenaturais que ameaçam a realidade. O Outro Lado é um lugar caótico, escuro e perigoso, onde as leis da física e da lógica não se aplicam. O Outro Lado pode ser acessado por meio de portais, rituais ou eventos anômalos, mas é um lugar hostil e mortal para os humanos.
-
Quem é Cellbit?
-
Cellbit é o criador do RPG Ordem Paranormal e um dos maiores streamers e youtubers do Brasil. Ele começou a fazer vídeos de jogos em 2012, e desde então ganhou milhões de fãs e seguidores. Ele é conhecido por seu humor, sua criatividade e sua paixão por RPGs. Ele também é o fundador da Cellbit Produções, uma empresa de entretenimento que produz conteúdo para diversas plataformas.
-
O que é a Jambô?
-
A Jambô é a editora parceira de Cellbit na produção do livro de regras RPG Cellbit PDF download. A Jambô é uma das maiores editoras de RPG do Brasil, responsável por publicar títulos como Tormenta, 3D&T, Dragon Age, Mutantes & Malfeitores, Reinos de Ferro e muitos outros. A Jambô também publica livros de ficção, quadrinhos e revistas especializadas em RPGs e cultura nerd.
-
Como jogar RPG online?
-
Se você quer jogar RPG online com seus amigos, existem várias ferramentas que podem te ajudar. Algumas delas são:
-
-
Rolz: um site que permite criar salas de chat com dados virtuais, ideal para jogar RPGs simples e rápidos;
-
Discord: um aplicativo de comunicação por voz e texto que permite criar servidores com canais dedicados para jogar RPGs, além de integrar bots e plugins que facilitam o jogo;
-
Roll20: uma plataforma online que permite criar mesas de jogo virtuais com mapas, fichas, dados, músicas e muito mais, simulando uma experiência completa de RPG;
-
RPG2ic: um aplicativo brasileiro que permite jogar RPGs pelo celular, com chat, dados, fichas e recursos para criar e compartilhar aventuras.
-
-
Como aprender mais sobre RPGs?
-
Se você quer aprender mais sobre RPGs, existem várias fontes de informação e entretenimento que podem te ajudar. Algumas delas são:
-
-
Livros: existem vários livros que ensinam as regras, os conceitos e as dicas para jogar RPGs, como o Livro dos Jogadores, o Livro do Mestre e o Livro dos Monstros de Dungeons & Dragons, o Manual Básico de 3D&T Alpha, o Guia do Jogador de Tormenta20 e o Guia da Trilogia de Vampiro: A Máscara;
-
Vídeos: existem vários canais no YouTube que falam sobre RPGs, como o Nerdcast RPG, o Formação Fireball, o Covil do RPG, o Perdidos no Play e o próprio canal do Cellbit;
-
Podcasts: existem vários podcasts que falam sobre RPGs, como o Podcast dos Reinos, o Rolando 20, o Papo Furado na Taverna, o Taverna do Beholder Cego e o Podcast da Ordem Paranormal;
-
Blogs: existem vários blogs que falam sobre RPGs, como o Joga o Dado, o Mundos Colidem, o Pensotopia, o RPGista e o Paragons;
-
Revistas: existem várias revistas que falam sobre RPGs, como a Dragão Brasil, a Dungeon Magazine, a Dragon Magazine e a Roleplaying Tips.
-"""
-demo = gr.Interface(
- inference, [
- gr.inputs.Image(type="filepath", label="Input"),
- # gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer', 'CodeFormer'], type="value", default='v1.4', label='version'),
- gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer','CodeFormer','RealESR-General-x4v3'], type="value", default='v1.4', label='version'),
- gr.inputs.Number(label="Rescaling factor", default=2),
- # gr.Slider(0, 100, label='Weight, only for CodeFormer. 0 for better quality, 100 for better identity', default=50)
- ], [
- gr.outputs.Image(type="numpy", label="Output (The whole image)"),
- gr.outputs.File(label="Download the output image")
- ],
- title=title,
- description=description,
- article=article,
- # examples=[['AI-generate.jpg', 'v1.4', 2, 50], ['lincoln.jpg', 'v1.4', 2, 50], ['Blake_Lively.jpg', 'v1.4', 2, 50],
- # ['10045.png', 'v1.4', 2, 50]]).launch()
- examples=[['a1.jpg', 'v1.4', 2], ['a2.jpg', 'v1.4', 2], ['a3.jpg', 'v1.4', 2],['a4.jpg', 'v1.4', 2]])
-
-demo.queue(concurrency_count=4)
-demo.launch()
\ No newline at end of file
diff --git "a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" "b/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py"
deleted file mode 100644
index ffbb05599ef09c9de25334ebeca2eef8022b9aaf..0000000000000000000000000000000000000000
--- "a/spaces/Ameaou/academic-chatgpt3.1/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py"
+++ /dev/null
@@ -1,160 +0,0 @@
-from toolbox import update_ui
-from toolbox import CatchException, report_execption, write_results_to_file
-from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive
-
-fast_debug = False
-
-def readPdf(pdfPath):
- """
- 读取pdf文件,返回文本内容
- """
- import pdfminer
- from pdfminer.pdfparser import PDFParser
- from pdfminer.pdfdocument import PDFDocument
- from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed
- from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
- from pdfminer.pdfdevice import PDFDevice
- from pdfminer.layout import LAParams
- from pdfminer.converter import PDFPageAggregator
-
- fp = open(pdfPath, 'rb')
-
- # Create a PDF parser object associated with the file object
- parser = PDFParser(fp)
-
- # Create a PDF document object that stores the document structure.
- # Password for initialization as 2nd parameter
- document = PDFDocument(parser)
- # Check if the document allows text extraction. If not, abort.
- if not document.is_extractable:
- raise PDFTextExtractionNotAllowed
-
- # Create a PDF resource manager object that stores shared resources.
- rsrcmgr = PDFResourceManager()
-
- # Create a PDF device object.
- # device = PDFDevice(rsrcmgr)
-
- # BEGIN LAYOUT ANALYSIS.
- # Set parameters for analysis.
- laparams = LAParams(
- char_margin=10.0,
- line_margin=0.2,
- boxes_flow=0.2,
- all_texts=False,
- )
- # Create a PDF page aggregator object.
- device = PDFPageAggregator(rsrcmgr, laparams=laparams)
- # Create a PDF interpreter object.
- interpreter = PDFPageInterpreter(rsrcmgr, device)
-
- # loop over all pages in the document
- outTextList = []
- for page in PDFPage.create_pages(document):
- # read the page into a layout object
- interpreter.process_page(page)
- layout = device.get_result()
- for obj in layout._objs:
- if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal):
- # print(obj.get_text())
- outTextList.append(obj.get_text())
-
- return outTextList
-
-
-def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt):
- import time, glob, os
- from bs4 import BeautifulSoup
- print('begin analysis on:', file_manifest)
- for index, fp in enumerate(file_manifest):
- if ".tex" in fp:
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
- file_content = f.read()
- if ".pdf" in fp.lower():
- file_content = readPdf(fp)
- file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk')
-
- prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
- i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```'
- i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}'
- chatbot.append((i_say_show_user, "[Local Message] waiting gpt response."))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- if not fast_debug:
- msg = '正常'
- # ** gpt request **
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
- inputs=i_say,
- inputs_show_user=i_say_show_user,
- llm_kwargs=llm_kwargs,
- chatbot=chatbot,
- history=[],
- sys_prompt="总结文章。"
- ) # 带超时倒计时
- chatbot[-1] = (i_say_show_user, gpt_say)
- history.append(i_say_show_user); history.append(gpt_say)
- yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
- if not fast_debug: time.sleep(2)
-
- all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)])
- i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。'
- chatbot.append((i_say, "[Local Message] waiting gpt response."))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- if not fast_debug:
- msg = '正常'
- # ** gpt request **
- gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive(
- inputs=i_say,
- inputs_show_user=i_say,
- llm_kwargs=llm_kwargs,
- chatbot=chatbot,
- history=history,
- sys_prompt="总结文章。"
- ) # 带超时倒计时
- chatbot[-1] = (i_say, gpt_say)
- history.append(i_say); history.append(gpt_say)
- yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
- res = write_results_to_file(history)
- chatbot.append(("完成了吗?", res))
- yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面
-
-
-
-@CatchException
-def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- history = [] # 清空历史,以免输入溢出
- import glob, os
-
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import pdfminer, bs4
- except:
- report_execption(chatbot, history,
- a = f"解析项目: {txt}",
- b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \
- [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \
- # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \
- # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)]
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt)
-
diff --git a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/callback/__init__.py b/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/callback/__init__.py
deleted file mode 100644
index ccab5ae57a2d832309c4e72fcc51e5e2ad48a59b..0000000000000000000000000000000000000000
--- a/spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/callback/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from pytorch_lightning.callbacks import (
- ModelCheckpoint,
- LearningRateMonitor,
- EarlyStopping,
-)
-from src.utils.registry import Registry
-
-# from src.callback.visualizer_callbacks import VisualizerCallback
-
-CALLBACK_REGISTRY = Registry("CALLBACK")
-
-CALLBACK_REGISTRY.register(EarlyStopping)
-CALLBACK_REGISTRY.register(ModelCheckpoint)
-CALLBACK_REGISTRY.register(LearningRateMonitor)
-# TODO: add WandB visualizer callback
-# CALLBACK_REGISTRY.register(VisualizerCallback)
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/README_sdxl.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/README_sdxl.md
deleted file mode 100644
index 7dcde78f2cfdcd2876686f865c57be8b930dab41..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/dreambooth/README_sdxl.md
+++ /dev/null
@@ -1,189 +0,0 @@
-# DreamBooth training example for Stable Diffusion XL (SDXL)
-
-[DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few (3~5) images of a subject.
-
-The `train_dreambooth_lora_sdxl.py` script shows how to implement the training procedure and adapt it for [Stable Diffusion XL](https://huggingface.co/papers/2307.01952).
-
-> 💡 **Note**: For now, we only allow DreamBooth fine-tuning of the SDXL UNet via LoRA. LoRA is a parameter-efficient fine-tuning technique introduced in [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*.
-
-## Running locally with PyTorch
-
-### Installing the dependencies
-
-Before running the scripts, make sure to install the library's training dependencies:
-
-**Important**
-
-To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
-
-```bash
-git clone https://github.com/huggingface/diffusers
-cd diffusers
-pip install -e .
-```
-
-Then cd in the `examples/dreambooth` folder and run
-```bash
-pip install -r requirements_sdxl.txt
-```
-
-And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
-
-```bash
-accelerate config
-```
-
-Or for a default accelerate configuration without answering questions about your environment
-
-```bash
-accelerate config default
-```
-
-Or if your environment doesn't support an interactive shell (e.g., a notebook)
-
-```python
-from accelerate.utils import write_basic_config
-write_basic_config()
-```
-
-When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups.
-
-### Dog toy example
-
-Now let's get our dataset. For this example we will use some dog images: https://huggingface.co/datasets/diffusers/dog-example.
-
-Let's first download it locally:
-
-```python
-from huggingface_hub import snapshot_download
-
-local_dir = "./dog"
-snapshot_download(
- "diffusers/dog-example",
- local_dir=local_dir, repo_type="dataset",
- ignore_patterns=".gitattributes",
-)
-```
-
-Since SDXL 0.9 weights are gated, we need to be authenticated to be able to use them. So, let's run:
-
-```bash
-huggingface-cli login
-```
-
-This will also allow us to push the trained LoRA parameters to the Hugging Face Hub platform.
-
-Now, we can launch training using:
-
-```bash
-export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0"
-export INSTANCE_DIR="dog"
-export OUTPUT_DIR="lora-trained-xl"
-
-accelerate launch train_dreambooth_lora_sdxl.py \
- --pretrained_model_name_or_path=$MODEL_NAME \
- --instance_data_dir=$INSTANCE_DIR \
- --output_dir=$OUTPUT_DIR \
- --mixed_precision="fp16" \
- --instance_prompt="a photo of sks dog" \
- --resolution=1024 \
- --train_batch_size=1 \
- --gradient_accumulation_steps=4 \
- --learning_rate=1e-4 \
- --report_to="wandb" \
- --lr_scheduler="constant" \
- --lr_warmup_steps=0 \
- --max_train_steps=500 \
- --validation_prompt="A photo of sks dog in a bucket" \
- --validation_epochs=25 \
- --seed="0" \
- --push_to_hub
-```
-
-To better track our training experiments, we're using the following flags in the command above:
-
-* `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`.
-* `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected.
-
-Our experiments were conducted on a single 40GB A100 GPU.
-
-### Inference
-
-Once training is done, we can perform inference like so:
-
-```python
-from huggingface_hub.repocard import RepoCard
-from diffusers import DiffusionPipeline
-import torch
-
-lora_model_id = <"lora-sdxl-dreambooth-id">
-card = RepoCard.load(lora_model_id)
-base_model_id = card.data.to_dict()["base_model"]
-
-pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16)
-pipe = pipe.to("cuda")
-pipe.load_lora_weights(lora_model_id)
-image = pipe("A picture of a sks dog in a bucket", num_inference_steps=25).images[0]
-image.save("sks_dog.png")
-```
-
-We can further refine the outputs with the [Refiner](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0):
-
-```python
-from huggingface_hub.repocard import RepoCard
-from diffusers import DiffusionPipeline, StableDiffusionXLImg2ImgPipeline
-import torch
-
-lora_model_id = <"lora-sdxl-dreambooth-id">
-card = RepoCard.load(lora_model_id)
-base_model_id = card.data.to_dict()["base_model"]
-
-# Load the base pipeline and load the LoRA parameters into it.
-pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16)
-pipe = pipe.to("cuda")
-pipe.load_lora_weights(lora_model_id)
-
-# Load the refiner.
-refiner = StableDiffusionXLImg2ImgPipeline.from_pretrained(
- "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16"
-)
-refiner.to("cuda")
-
-prompt = "A picture of a sks dog in a bucket"
-generator = torch.Generator("cuda").manual_seed(0)
-
-# Run inference.
-image = pipe(prompt=prompt, output_type="latent", generator=generator).images[0]
-image = refiner(prompt=prompt, image=image[None, :], generator=generator).images[0]
-image.save("refined_sks_dog.png")
-```
-
-Here's a side-by-side comparison of the with and without Refiner pipeline outputs:
-
-| Without Refiner | With Refiner |
-|---|---|
-|  |  |
-
-### Training with text encoder(s)
-
-Alongside the UNet, LoRA fine-tuning of the text encoders is also supported. To do so, just specify `--train_text_encoder` while launching training. Please keep the following points in mind:
-
-* SDXL has two text encoders. So, we fine-tune both using LoRA.
-* When not fine-tuning the text encoders, we ALWAYS precompute the text embeddings to save memory.
-
-### Specifying a better VAE
-
-SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of a better VAE (such as [this one](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)).
-
-## Notes
-
-In our experiments, we found that SDXL yields good initial results without extensive hyperparameter tuning. For example, without fine-tuning the text encoders and without using prior-preservation, we observed decent results. We didn't explore further hyper-parameter tuning experiments, but we do encourage the community to explore this avenue further and share their results with us 🤗
-
-## Results
-
-You can explore the results from a couple of our internal experiments by checking out this link: [https://wandb.ai/sayakpaul/dreambooth-lora-sd-xl](https://wandb.ai/sayakpaul/dreambooth-lora-sd-xl). Specifically, we used the same script with the exact same hyperparameters on the following datasets:
-
-* [Dogs](https://huggingface.co/datasets/diffusers/dog-example)
-* [Starbucks logo](https://huggingface.co/datasets/diffusers/starbucks-example)
-* [Mr. Potato Head](https://huggingface.co/datasets/diffusers/potato-head-example)
-* [Keramer face](https://huggingface.co/datasets/diffusers/keramer-face-example)
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
deleted file mode 100644
index b7829f76ec12f946490618e0d03857777efdf219..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unidiffuser/modeling_uvit.py
+++ /dev/null
@@ -1,1196 +0,0 @@
-import math
-from typing import Optional, Union
-
-import torch
-from torch import nn
-
-from ...configuration_utils import ConfigMixin, register_to_config
-from ...models import ModelMixin
-from ...models.attention import AdaLayerNorm, FeedForward
-from ...models.attention_processor import Attention
-from ...models.embeddings import TimestepEmbedding, Timesteps, get_2d_sincos_pos_embed
-from ...models.transformer_2d import Transformer2DModelOutput
-from ...utils import logging
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-def _no_grad_trunc_normal_(tensor, mean, std, a, b):
- # Cut & paste from PyTorch official master until it's in a few official releases - RW
- # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
- def norm_cdf(x):
- # Computes standard normal cumulative distribution function
- return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
-
- if (mean < a - 2 * std) or (mean > b + 2 * std):
- logger.warning(
- "mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
- "The distribution of values may be incorrect."
- )
-
- with torch.no_grad():
- # Values are generated by using a truncated uniform distribution and
- # then using the inverse CDF for the normal distribution.
- # Get upper and lower cdf values
- l = norm_cdf((a - mean) / std)
- u = norm_cdf((b - mean) / std)
-
- # Uniformly fill tensor with values from [l, u], then translate to
- # [2l-1, 2u-1].
- tensor.uniform_(2 * l - 1, 2 * u - 1)
-
- # Use inverse cdf transform for normal distribution to get truncated
- # standard normal
- tensor.erfinv_()
-
- # Transform to proper mean, std
- tensor.mul_(std * math.sqrt(2.0))
- tensor.add_(mean)
-
- # Clamp to ensure it's in the proper range
- tensor.clamp_(min=a, max=b)
- return tensor
-
-
-def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
- # type: (torch.Tensor, float, float, float, float) -> torch.Tensor
- r"""Fills the input Tensor with values drawn from a truncated
- normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean},
- \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for
- generating the random values works best when :math:`a \leq \text{mean} \leq b`.
-
- Args:
- tensor: an n-dimensional `torch.Tensor`
- mean: the mean of the normal distribution
- std: the standard deviation of the normal distribution
- a: the minimum cutoff value
- b: the maximum cutoff value
- Examples:
- >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w)
- """
- return _no_grad_trunc_normal_(tensor, mean, std, a, b)
-
-
-class PatchEmbed(nn.Module):
- """2D Image to Patch Embedding"""
-
- def __init__(
- self,
- height=224,
- width=224,
- patch_size=16,
- in_channels=3,
- embed_dim=768,
- layer_norm=False,
- flatten=True,
- bias=True,
- use_pos_embed=True,
- ):
- super().__init__()
-
- num_patches = (height // patch_size) * (width // patch_size)
- self.flatten = flatten
- self.layer_norm = layer_norm
-
- self.proj = nn.Conv2d(
- in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias
- )
- if layer_norm:
- self.norm = nn.LayerNorm(embed_dim, elementwise_affine=False, eps=1e-6)
- else:
- self.norm = None
-
- self.use_pos_embed = use_pos_embed
- if self.use_pos_embed:
- pos_embed = get_2d_sincos_pos_embed(embed_dim, int(num_patches**0.5))
- self.register_buffer("pos_embed", torch.from_numpy(pos_embed).float().unsqueeze(0), persistent=False)
-
- def forward(self, latent):
- latent = self.proj(latent)
- if self.flatten:
- latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC
- if self.layer_norm:
- latent = self.norm(latent)
- if self.use_pos_embed:
- return latent + self.pos_embed
- else:
- return latent
-
-
-class SkipBlock(nn.Module):
- def __init__(self, dim: int):
- super().__init__()
-
- self.skip_linear = nn.Linear(2 * dim, dim)
-
- # Use torch.nn.LayerNorm for now, following the original code
- self.norm = nn.LayerNorm(dim)
-
- def forward(self, x, skip):
- x = self.skip_linear(torch.cat([x, skip], dim=-1))
- x = self.norm(x)
-
- return x
-
-
-# Modified to support both pre-LayerNorm and post-LayerNorm configurations
-# Don't support AdaLayerNormZero for now
-# Modified from diffusers.models.attention.BasicTransformerBlock
-class UTransformerBlock(nn.Module):
- r"""
- A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations.
-
- Parameters:
- dim (`int`): The number of channels in the input and output.
- num_attention_heads (`int`): The number of heads to use for multi-head attention.
- attention_head_dim (`int`): The number of channels in each head.
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
- cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
- activation_fn (`str`, *optional*, defaults to `"geglu"`):
- Activation function to be used in feed-forward.
- num_embeds_ada_norm (:obj: `int`, *optional*):
- The number of diffusion steps used during training. See `Transformer2DModel`.
- attention_bias (:obj: `bool`, *optional*, defaults to `False`):
- Configure if the attentions should contain a bias parameter.
- only_cross_attention (`bool`, *optional*):
- Whether to use only cross-attention layers. In this case two cross attention layers are used.
- double_self_attention (`bool`, *optional*):
- Whether to use two self-attention layers. In this case no cross attention layers are used.
- upcast_attention (`bool`, *optional*):
- Whether to upcast the query and key to float32 when performing the attention calculation.
- norm_elementwise_affine (`bool`, *optional*):
- Whether to use learnable per-element affine parameters during layer normalization.
- norm_type (`str`, defaults to `"layer_norm"`):
- The layer norm implementation to use.
- pre_layer_norm (`bool`, *optional*):
- Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
- as opposed to after ("post-LayerNorm"). Note that `BasicTransformerBlock` uses pre-LayerNorm, e.g.
- `pre_layer_norm = True`.
- final_dropout (`bool`, *optional*):
- Whether to use a final Dropout layer after the feedforward network.
- """
-
- def __init__(
- self,
- dim: int,
- num_attention_heads: int,
- attention_head_dim: int,
- dropout=0.0,
- cross_attention_dim: Optional[int] = None,
- activation_fn: str = "geglu",
- num_embeds_ada_norm: Optional[int] = None,
- attention_bias: bool = False,
- only_cross_attention: bool = False,
- double_self_attention: bool = False,
- upcast_attention: bool = False,
- norm_elementwise_affine: bool = True,
- norm_type: str = "layer_norm",
- pre_layer_norm: bool = True,
- final_dropout: bool = False,
- ):
- super().__init__()
- self.only_cross_attention = only_cross_attention
-
- self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
-
- self.pre_layer_norm = pre_layer_norm
-
- if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
- raise ValueError(
- f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
- f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
- )
-
- # 1. Self-Attn
- self.attn1 = Attention(
- query_dim=dim,
- heads=num_attention_heads,
- dim_head=attention_head_dim,
- dropout=dropout,
- bias=attention_bias,
- cross_attention_dim=cross_attention_dim if only_cross_attention else None,
- upcast_attention=upcast_attention,
- )
-
- # 2. Cross-Attn
- if cross_attention_dim is not None or double_self_attention:
- self.attn2 = Attention(
- query_dim=dim,
- cross_attention_dim=cross_attention_dim if not double_self_attention else None,
- heads=num_attention_heads,
- dim_head=attention_head_dim,
- dropout=dropout,
- bias=attention_bias,
- upcast_attention=upcast_attention,
- ) # is self-attn if encoder_hidden_states is none
- else:
- self.attn2 = None
-
- if self.use_ada_layer_norm:
- self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
- else:
- self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
-
- if cross_attention_dim is not None or double_self_attention:
- # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
- # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
- # the second cross attention block.
- self.norm2 = (
- AdaLayerNorm(dim, num_embeds_ada_norm)
- if self.use_ada_layer_norm
- else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
- )
- else:
- self.norm2 = None
-
- # 3. Feed-forward
- self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
- self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
-
- def forward(
- self,
- hidden_states,
- attention_mask=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- timestep=None,
- cross_attention_kwargs=None,
- class_labels=None,
- ):
- # Pre-LayerNorm
- if self.pre_layer_norm:
- if self.use_ada_layer_norm:
- norm_hidden_states = self.norm1(hidden_states, timestep)
- else:
- norm_hidden_states = self.norm1(hidden_states)
- else:
- norm_hidden_states = hidden_states
-
- # 1. Self-Attention
- cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
- attn_output = self.attn1(
- norm_hidden_states,
- encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
- attention_mask=attention_mask,
- **cross_attention_kwargs,
- )
-
- # Post-LayerNorm
- if not self.pre_layer_norm:
- if self.use_ada_layer_norm:
- attn_output = self.norm1(attn_output, timestep)
- else:
- attn_output = self.norm1(attn_output)
-
- hidden_states = attn_output + hidden_states
-
- if self.attn2 is not None:
- # Pre-LayerNorm
- if self.pre_layer_norm:
- norm_hidden_states = (
- self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
- )
- else:
- norm_hidden_states = hidden_states
- # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly
- # prepare attention mask here
-
- # 2. Cross-Attention
- attn_output = self.attn2(
- norm_hidden_states,
- encoder_hidden_states=encoder_hidden_states,
- attention_mask=encoder_attention_mask,
- **cross_attention_kwargs,
- )
-
- # Post-LayerNorm
- if not self.pre_layer_norm:
- attn_output = self.norm2(attn_output, timestep) if self.use_ada_layer_norm else self.norm2(attn_output)
-
- hidden_states = attn_output + hidden_states
-
- # 3. Feed-forward
- # Pre-LayerNorm
- if self.pre_layer_norm:
- norm_hidden_states = self.norm3(hidden_states)
- else:
- norm_hidden_states = hidden_states
-
- ff_output = self.ff(norm_hidden_states)
-
- # Post-LayerNorm
- if not self.pre_layer_norm:
- ff_output = self.norm3(ff_output)
-
- hidden_states = ff_output + hidden_states
-
- return hidden_states
-
-
-# Like UTransformerBlock except with LayerNorms on the residual backbone of the block
-# Modified from diffusers.models.attention.BasicTransformerBlock
-class UniDiffuserBlock(nn.Module):
- r"""
- A modification of BasicTransformerBlock which supports pre-LayerNorm and post-LayerNorm configurations and puts the
- LayerNorms on the residual backbone of the block. This matches the transformer block in the [original UniDiffuser
- implementation](https://github.com/thu-ml/unidiffuser/blob/main/libs/uvit_multi_post_ln_v1.py#L104).
-
- Parameters:
- dim (`int`): The number of channels in the input and output.
- num_attention_heads (`int`): The number of heads to use for multi-head attention.
- attention_head_dim (`int`): The number of channels in each head.
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
- cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
- activation_fn (`str`, *optional*, defaults to `"geglu"`):
- Activation function to be used in feed-forward.
- num_embeds_ada_norm (:obj: `int`, *optional*):
- The number of diffusion steps used during training. See `Transformer2DModel`.
- attention_bias (:obj: `bool`, *optional*, defaults to `False`):
- Configure if the attentions should contain a bias parameter.
- only_cross_attention (`bool`, *optional*):
- Whether to use only cross-attention layers. In this case two cross attention layers are used.
- double_self_attention (`bool`, *optional*):
- Whether to use two self-attention layers. In this case no cross attention layers are used.
- upcast_attention (`bool`, *optional*):
- Whether to upcast the query and key to float() when performing the attention calculation.
- norm_elementwise_affine (`bool`, *optional*):
- Whether to use learnable per-element affine parameters during layer normalization.
- norm_type (`str`, defaults to `"layer_norm"`):
- The layer norm implementation to use.
- pre_layer_norm (`bool`, *optional*):
- Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
- as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm
- (`pre_layer_norm = False`).
- final_dropout (`bool`, *optional*):
- Whether to use a final Dropout layer after the feedforward network.
- """
-
- def __init__(
- self,
- dim: int,
- num_attention_heads: int,
- attention_head_dim: int,
- dropout=0.0,
- cross_attention_dim: Optional[int] = None,
- activation_fn: str = "geglu",
- num_embeds_ada_norm: Optional[int] = None,
- attention_bias: bool = False,
- only_cross_attention: bool = False,
- double_self_attention: bool = False,
- upcast_attention: bool = False,
- norm_elementwise_affine: bool = True,
- norm_type: str = "layer_norm",
- pre_layer_norm: bool = False,
- final_dropout: bool = True,
- ):
- super().__init__()
- self.only_cross_attention = only_cross_attention
-
- self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
-
- self.pre_layer_norm = pre_layer_norm
-
- if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
- raise ValueError(
- f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
- f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
- )
-
- # 1. Self-Attn
- self.attn1 = Attention(
- query_dim=dim,
- heads=num_attention_heads,
- dim_head=attention_head_dim,
- dropout=dropout,
- bias=attention_bias,
- cross_attention_dim=cross_attention_dim if only_cross_attention else None,
- upcast_attention=upcast_attention,
- )
-
- # 2. Cross-Attn
- if cross_attention_dim is not None or double_self_attention:
- self.attn2 = Attention(
- query_dim=dim,
- cross_attention_dim=cross_attention_dim if not double_self_attention else None,
- heads=num_attention_heads,
- dim_head=attention_head_dim,
- dropout=dropout,
- bias=attention_bias,
- upcast_attention=upcast_attention,
- ) # is self-attn if encoder_hidden_states is none
- else:
- self.attn2 = None
-
- if self.use_ada_layer_norm:
- self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
- else:
- self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
-
- if cross_attention_dim is not None or double_self_attention:
- # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
- # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
- # the second cross attention block.
- self.norm2 = (
- AdaLayerNorm(dim, num_embeds_ada_norm)
- if self.use_ada_layer_norm
- else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
- )
- else:
- self.norm2 = None
-
- # 3. Feed-forward
- self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
- self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
-
- def forward(
- self,
- hidden_states,
- attention_mask=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- timestep=None,
- cross_attention_kwargs=None,
- class_labels=None,
- ):
- # Following the diffusers transformer block implementation, put the LayerNorm on the
- # residual backbone
- # Pre-LayerNorm
- if self.pre_layer_norm:
- if self.use_ada_layer_norm:
- hidden_states = self.norm1(hidden_states, timestep)
- else:
- hidden_states = self.norm1(hidden_states)
-
- # 1. Self-Attention
- cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
- attn_output = self.attn1(
- hidden_states,
- encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
- attention_mask=attention_mask,
- **cross_attention_kwargs,
- )
-
- hidden_states = attn_output + hidden_states
-
- # Following the diffusers transformer block implementation, put the LayerNorm on the
- # residual backbone
- # Post-LayerNorm
- if not self.pre_layer_norm:
- if self.use_ada_layer_norm:
- hidden_states = self.norm1(hidden_states, timestep)
- else:
- hidden_states = self.norm1(hidden_states)
-
- if self.attn2 is not None:
- # Pre-LayerNorm
- if self.pre_layer_norm:
- hidden_states = (
- self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
- )
- # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly
- # prepare attention mask here
-
- # 2. Cross-Attention
- attn_output = self.attn2(
- hidden_states,
- encoder_hidden_states=encoder_hidden_states,
- attention_mask=encoder_attention_mask,
- **cross_attention_kwargs,
- )
-
- hidden_states = attn_output + hidden_states
-
- # Post-LayerNorm
- if not self.pre_layer_norm:
- hidden_states = (
- self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
- )
-
- # 3. Feed-forward
- # Pre-LayerNorm
- if self.pre_layer_norm:
- hidden_states = self.norm3(hidden_states)
-
- ff_output = self.ff(hidden_states)
-
- hidden_states = ff_output + hidden_states
-
- # Post-LayerNorm
- if not self.pre_layer_norm:
- hidden_states = self.norm3(hidden_states)
-
- return hidden_states
-
-
-# Modified from diffusers.models.transformer_2d.Transformer2DModel
-# Modify the transformer block structure to be U-Net like following U-ViT
-# Only supports patch-style input and torch.nn.LayerNorm currently
-# https://github.com/baofff/U-ViT
-class UTransformer2DModel(ModelMixin, ConfigMixin):
- """
- Transformer model based on the [U-ViT](https://github.com/baofff/U-ViT) architecture for image-like data. Compared
- to [`Transformer2DModel`], this model has skip connections between transformer blocks in a "U"-shaped fashion,
- similar to a U-Net. Supports only continuous (actual embeddings) inputs, which are embedded via a [`PatchEmbed`]
- layer and then reshaped to (b, t, d).
-
- Parameters:
- num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
- attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
- in_channels (`int`, *optional*):
- Pass if the input is continuous. The number of channels in the input.
- out_channels (`int`, *optional*):
- The number of output channels; if `None`, defaults to `in_channels`.
- num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
- norm_num_groups (`int`, *optional*, defaults to `32`):
- The number of groups to use when performing Group Normalization.
- cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
- attention_bias (`bool`, *optional*):
- Configure if the TransformerBlocks' attention should contain a bias parameter.
- sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
- Note that this is fixed at training time as it is used for learning a number of position embeddings. See
- `ImagePositionalEmbeddings`.
- num_vector_embeds (`int`, *optional*):
- Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
- Includes the class for the masked latent pixel.
- patch_size (`int`, *optional*, defaults to 2):
- The patch size to use in the patch embedding.
- activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
- num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
- The number of diffusion steps used during training. Note that this is fixed at training time as it is used
- to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
- up to but not more than steps than `num_embeds_ada_norm`.
- use_linear_projection (int, *optional*): TODO: Not used
- only_cross_attention (`bool`, *optional*):
- Whether to use only cross-attention layers. In this case two cross attention layers are used in each
- transformer block.
- upcast_attention (`bool`, *optional*):
- Whether to upcast the query and key to float() when performing the attention calculation.
- norm_type (`str`, *optional*, defaults to `"layer_norm"`):
- The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`.
- block_type (`str`, *optional*, defaults to `"unidiffuser"`):
- The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual
- backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard
- behavior in `diffusers`.)
- pre_layer_norm (`bool`, *optional*):
- Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
- as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm
- (`pre_layer_norm = False`).
- norm_elementwise_affine (`bool`, *optional*):
- Whether to use learnable per-element affine parameters during layer normalization.
- use_patch_pos_embed (`bool`, *optional*):
- Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`).
- final_dropout (`bool`, *optional*):
- Whether to use a final Dropout layer after the feedforward network.
- """
-
- @register_to_config
- def __init__(
- self,
- num_attention_heads: int = 16,
- attention_head_dim: int = 88,
- in_channels: Optional[int] = None,
- out_channels: Optional[int] = None,
- num_layers: int = 1,
- dropout: float = 0.0,
- norm_num_groups: int = 32,
- cross_attention_dim: Optional[int] = None,
- attention_bias: bool = False,
- sample_size: Optional[int] = None,
- num_vector_embeds: Optional[int] = None,
- patch_size: Optional[int] = 2,
- activation_fn: str = "geglu",
- num_embeds_ada_norm: Optional[int] = None,
- use_linear_projection: bool = False,
- only_cross_attention: bool = False,
- upcast_attention: bool = False,
- norm_type: str = "layer_norm",
- block_type: str = "unidiffuser",
- pre_layer_norm: bool = False,
- norm_elementwise_affine: bool = True,
- use_patch_pos_embed=False,
- ff_final_dropout: bool = False,
- ):
- super().__init__()
- self.use_linear_projection = use_linear_projection
- self.num_attention_heads = num_attention_heads
- self.attention_head_dim = attention_head_dim
- inner_dim = num_attention_heads * attention_head_dim
-
- # 1. Input
- # Only support patch input of shape (batch_size, num_channels, height, width) for now
- assert in_channels is not None and patch_size is not None, "Patch input requires in_channels and patch_size."
-
- assert sample_size is not None, "UTransformer2DModel over patched input must provide sample_size"
-
- # 2. Define input layers
- self.height = sample_size
- self.width = sample_size
-
- self.patch_size = patch_size
- self.pos_embed = PatchEmbed(
- height=sample_size,
- width=sample_size,
- patch_size=patch_size,
- in_channels=in_channels,
- embed_dim=inner_dim,
- use_pos_embed=use_patch_pos_embed,
- )
-
- # 3. Define transformers blocks
- # Modify this to have in_blocks ("downsample" blocks, even though we don't actually downsample), a mid_block,
- # and out_blocks ("upsample" blocks). Like a U-Net, there are skip connections from in_blocks to out_blocks in
- # a "U"-shaped fashion (e.g. first in_block to last out_block, etc.).
- # Quick hack to make the transformer block type configurable
- if block_type == "unidiffuser":
- block_cls = UniDiffuserBlock
- else:
- block_cls = UTransformerBlock
- self.transformer_in_blocks = nn.ModuleList(
- [
- block_cls(
- inner_dim,
- num_attention_heads,
- attention_head_dim,
- dropout=dropout,
- cross_attention_dim=cross_attention_dim,
- activation_fn=activation_fn,
- num_embeds_ada_norm=num_embeds_ada_norm,
- attention_bias=attention_bias,
- only_cross_attention=only_cross_attention,
- upcast_attention=upcast_attention,
- norm_type=norm_type,
- pre_layer_norm=pre_layer_norm,
- norm_elementwise_affine=norm_elementwise_affine,
- final_dropout=ff_final_dropout,
- )
- for d in range(num_layers // 2)
- ]
- )
-
- self.transformer_mid_block = block_cls(
- inner_dim,
- num_attention_heads,
- attention_head_dim,
- dropout=dropout,
- cross_attention_dim=cross_attention_dim,
- activation_fn=activation_fn,
- num_embeds_ada_norm=num_embeds_ada_norm,
- attention_bias=attention_bias,
- only_cross_attention=only_cross_attention,
- upcast_attention=upcast_attention,
- norm_type=norm_type,
- pre_layer_norm=pre_layer_norm,
- norm_elementwise_affine=norm_elementwise_affine,
- final_dropout=ff_final_dropout,
- )
-
- # For each skip connection, we use a SkipBlock (concatenation + Linear + LayerNorm) to process the inputs
- # before each transformer out_block.
- self.transformer_out_blocks = nn.ModuleList(
- [
- nn.ModuleDict(
- {
- "skip": SkipBlock(
- inner_dim,
- ),
- "block": block_cls(
- inner_dim,
- num_attention_heads,
- attention_head_dim,
- dropout=dropout,
- cross_attention_dim=cross_attention_dim,
- activation_fn=activation_fn,
- num_embeds_ada_norm=num_embeds_ada_norm,
- attention_bias=attention_bias,
- only_cross_attention=only_cross_attention,
- upcast_attention=upcast_attention,
- norm_type=norm_type,
- pre_layer_norm=pre_layer_norm,
- norm_elementwise_affine=norm_elementwise_affine,
- final_dropout=ff_final_dropout,
- ),
- }
- )
- for d in range(num_layers // 2)
- ]
- )
-
- # 4. Define output layers
- self.out_channels = in_channels if out_channels is None else out_channels
-
- # Following the UniDiffuser U-ViT implementation, we process the transformer output with
- # a LayerNorm layer with per-element affine params
- self.norm_out = nn.LayerNorm(inner_dim)
-
- def forward(
- self,
- hidden_states,
- encoder_hidden_states=None,
- timestep=None,
- class_labels=None,
- cross_attention_kwargs=None,
- return_dict: bool = True,
- hidden_states_is_embedding: bool = False,
- unpatchify: bool = True,
- ):
- """
- Args:
- hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.
- When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input
- hidden_states
- encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
- Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
- self-attention.
- timestep ( `torch.long`, *optional*):
- Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.
- class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
- Optional class labels to be applied as an embedding in AdaLayerZeroNorm. Used to indicate class labels
- conditioning.
- cross_attention_kwargs (*optional*):
- Keyword arguments to supply to the cross attention layers, if used.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
- hidden_states_is_embedding (`bool`, *optional*, defaults to `False`):
- Whether or not hidden_states is an embedding directly usable by the transformer. In this case we will
- ignore input handling (e.g. continuous, vectorized, etc.) and directly feed hidden_states into the
- transformer blocks.
- unpatchify (`bool`, *optional*, defaults to `True`):
- Whether to unpatchify the transformer output.
-
- Returns:
- [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:
- [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When
- returning a tuple, the first element is the sample tensor.
- """
- # 0. Check inputs
-
- if not unpatchify and return_dict:
- raise ValueError(
- f"Cannot both define `unpatchify`: {unpatchify} and `return_dict`: {return_dict} since when"
- f" `unpatchify` is {unpatchify} the returned output is of shape (batch_size, seq_len, hidden_dim)"
- " rather than (batch_size, num_channels, height, width)."
- )
-
- # 1. Input
- if not hidden_states_is_embedding:
- hidden_states = self.pos_embed(hidden_states)
-
- # 2. Blocks
-
- # In ("downsample") blocks
- skips = []
- for in_block in self.transformer_in_blocks:
- hidden_states = in_block(
- hidden_states,
- encoder_hidden_states=encoder_hidden_states,
- timestep=timestep,
- cross_attention_kwargs=cross_attention_kwargs,
- class_labels=class_labels,
- )
- skips.append(hidden_states)
-
- # Mid block
- hidden_states = self.transformer_mid_block(hidden_states)
-
- # Out ("upsample") blocks
- for out_block in self.transformer_out_blocks:
- hidden_states = out_block["skip"](hidden_states, skips.pop())
- hidden_states = out_block["block"](
- hidden_states,
- encoder_hidden_states=encoder_hidden_states,
- timestep=timestep,
- cross_attention_kwargs=cross_attention_kwargs,
- class_labels=class_labels,
- )
-
- # 3. Output
- # Don't support AdaLayerNorm for now, so no conditioning/scale/shift logic
- hidden_states = self.norm_out(hidden_states)
- # hidden_states = self.proj_out(hidden_states)
-
- if unpatchify:
- # unpatchify
- height = width = int(hidden_states.shape[1] ** 0.5)
- hidden_states = hidden_states.reshape(
- shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
- )
- hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
- output = hidden_states.reshape(
- shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
- )
- else:
- output = hidden_states
-
- if not return_dict:
- return (output,)
-
- return Transformer2DModelOutput(sample=output)
-
-
-class UniDiffuserModel(ModelMixin, ConfigMixin):
- """
- Transformer model for a image-text [UniDiffuser](https://arxiv.org/pdf/2303.06555.pdf) model. This is a
- modification of [`UTransformer2DModel`] with input and output heads for the VAE-embedded latent image, the
- CLIP-embedded image, and the CLIP-embedded prompt (see paper for more details).
-
- Parameters:
- text_dim (`int`): The hidden dimension of the CLIP text model used to embed images.
- clip_img_dim (`int`): The hidden dimension of the CLIP vision model used to embed prompts.
- num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
- attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
- in_channels (`int`, *optional*):
- Pass if the input is continuous. The number of channels in the input.
- out_channels (`int`, *optional*):
- The number of output channels; if `None`, defaults to `in_channels`.
- num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
- norm_num_groups (`int`, *optional*, defaults to `32`):
- The number of groups to use when performing Group Normalization.
- cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.
- attention_bias (`bool`, *optional*):
- Configure if the TransformerBlocks' attention should contain a bias parameter.
- sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.
- Note that this is fixed at training time as it is used for learning a number of position embeddings. See
- `ImagePositionalEmbeddings`.
- num_vector_embeds (`int`, *optional*):
- Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.
- Includes the class for the masked latent pixel.
- patch_size (`int`, *optional*, defaults to 2):
- The patch size to use in the patch embedding.
- activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
- num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.
- The number of diffusion steps used during training. Note that this is fixed at training time as it is used
- to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for
- up to but not more than steps than `num_embeds_ada_norm`.
- use_linear_projection (int, *optional*): TODO: Not used
- only_cross_attention (`bool`, *optional*):
- Whether to use only cross-attention layers. In this case two cross attention layers are used in each
- transformer block.
- upcast_attention (`bool`, *optional*):
- Whether to upcast the query and key to float32 when performing the attention calculation.
- norm_type (`str`, *optional*, defaults to `"layer_norm"`):
- The Layer Normalization implementation to use. Defaults to `torch.nn.LayerNorm`.
- block_type (`str`, *optional*, defaults to `"unidiffuser"`):
- The transformer block implementation to use. If `"unidiffuser"`, has the LayerNorms on the residual
- backbone of each transformer block; otherwise has them in the attention/feedforward branches (the standard
- behavior in `diffusers`.)
- pre_layer_norm (`bool`, *optional*):
- Whether to perform layer normalization before the attention and feedforward operations ("pre-LayerNorm"),
- as opposed to after ("post-LayerNorm"). The original UniDiffuser implementation is post-LayerNorm
- (`pre_layer_norm = False`).
- norm_elementwise_affine (`bool`, *optional*):
- Whether to use learnable per-element affine parameters during layer normalization.
- use_patch_pos_embed (`bool`, *optional*):
- Whether to use position embeddings inside the patch embedding layer (`PatchEmbed`).
- ff_final_dropout (`bool`, *optional*):
- Whether to use a final Dropout layer after the feedforward network.
- use_data_type_embedding (`bool`, *optional*):
- Whether to use a data type embedding. This is only relevant for UniDiffuser-v1 style models; UniDiffuser-v1
- is continue-trained from UniDiffuser-v0 on non-publically-available data and accepts a `data_type`
- argument, which can either be `1` to use the weights trained on non-publically-available data or `0`
- otherwise. This argument is subsequently embedded by the data type embedding, if used.
- """
-
- @register_to_config
- def __init__(
- self,
- text_dim: int = 768,
- clip_img_dim: int = 512,
- num_text_tokens: int = 77,
- num_attention_heads: int = 16,
- attention_head_dim: int = 88,
- in_channels: Optional[int] = None,
- out_channels: Optional[int] = None,
- num_layers: int = 1,
- dropout: float = 0.0,
- norm_num_groups: int = 32,
- cross_attention_dim: Optional[int] = None,
- attention_bias: bool = False,
- sample_size: Optional[int] = None,
- num_vector_embeds: Optional[int] = None,
- patch_size: Optional[int] = None,
- activation_fn: str = "geglu",
- num_embeds_ada_norm: Optional[int] = None,
- use_linear_projection: bool = False,
- only_cross_attention: bool = False,
- upcast_attention: bool = False,
- norm_type: str = "layer_norm",
- block_type: str = "unidiffuser",
- pre_layer_norm: bool = False,
- use_timestep_embedding=False,
- norm_elementwise_affine: bool = True,
- use_patch_pos_embed=False,
- ff_final_dropout: bool = True,
- use_data_type_embedding: bool = False,
- ):
- super().__init__()
-
- # 0. Handle dimensions
- self.inner_dim = num_attention_heads * attention_head_dim
-
- assert sample_size is not None, "UniDiffuserModel over patched input must provide sample_size"
- self.sample_size = sample_size
- self.in_channels = in_channels
- self.out_channels = in_channels if out_channels is None else out_channels
-
- self.patch_size = patch_size
- # Assume image is square...
- self.num_patches = (self.sample_size // patch_size) * (self.sample_size // patch_size)
-
- # 1. Define input layers
- # 1.1 Input layers for text and image input
- # For now, only support patch input for VAE latent image input
- self.vae_img_in = PatchEmbed(
- height=sample_size,
- width=sample_size,
- patch_size=patch_size,
- in_channels=in_channels,
- embed_dim=self.inner_dim,
- use_pos_embed=use_patch_pos_embed,
- )
- self.clip_img_in = nn.Linear(clip_img_dim, self.inner_dim)
- self.text_in = nn.Linear(text_dim, self.inner_dim)
-
- # 1.2. Timestep embeddings for t_img, t_text
- self.timestep_img_proj = Timesteps(
- self.inner_dim,
- flip_sin_to_cos=True,
- downscale_freq_shift=0,
- )
- self.timestep_img_embed = (
- TimestepEmbedding(
- self.inner_dim,
- 4 * self.inner_dim,
- out_dim=self.inner_dim,
- )
- if use_timestep_embedding
- else nn.Identity()
- )
-
- self.timestep_text_proj = Timesteps(
- self.inner_dim,
- flip_sin_to_cos=True,
- downscale_freq_shift=0,
- )
- self.timestep_text_embed = (
- TimestepEmbedding(
- self.inner_dim,
- 4 * self.inner_dim,
- out_dim=self.inner_dim,
- )
- if use_timestep_embedding
- else nn.Identity()
- )
-
- # 1.3. Positional embedding
- self.num_text_tokens = num_text_tokens
- self.num_tokens = 1 + 1 + num_text_tokens + 1 + self.num_patches
- self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, self.inner_dim))
- self.pos_embed_drop = nn.Dropout(p=dropout)
- trunc_normal_(self.pos_embed, std=0.02)
-
- # 1.4. Handle data type token embeddings for UniDiffuser-V1, if necessary
- self.use_data_type_embedding = use_data_type_embedding
- if self.use_data_type_embedding:
- self.data_type_token_embedding = nn.Embedding(2, self.inner_dim)
- self.data_type_pos_embed_token = nn.Parameter(torch.zeros(1, 1, self.inner_dim))
-
- # 2. Define transformer blocks
- self.transformer = UTransformer2DModel(
- num_attention_heads=num_attention_heads,
- attention_head_dim=attention_head_dim,
- in_channels=in_channels,
- out_channels=out_channels,
- num_layers=num_layers,
- dropout=dropout,
- norm_num_groups=norm_num_groups,
- cross_attention_dim=cross_attention_dim,
- attention_bias=attention_bias,
- sample_size=sample_size,
- num_vector_embeds=num_vector_embeds,
- patch_size=patch_size,
- activation_fn=activation_fn,
- num_embeds_ada_norm=num_embeds_ada_norm,
- use_linear_projection=use_linear_projection,
- only_cross_attention=only_cross_attention,
- upcast_attention=upcast_attention,
- norm_type=norm_type,
- block_type=block_type,
- pre_layer_norm=pre_layer_norm,
- norm_elementwise_affine=norm_elementwise_affine,
- use_patch_pos_embed=use_patch_pos_embed,
- ff_final_dropout=ff_final_dropout,
- )
-
- # 3. Define output layers
- patch_dim = (patch_size**2) * out_channels
- self.vae_img_out = nn.Linear(self.inner_dim, patch_dim)
- self.clip_img_out = nn.Linear(self.inner_dim, clip_img_dim)
- self.text_out = nn.Linear(self.inner_dim, text_dim)
-
- @torch.jit.ignore
- def no_weight_decay(self):
- return {"pos_embed"}
-
- def forward(
- self,
- latent_image_embeds: torch.FloatTensor,
- image_embeds: torch.FloatTensor,
- prompt_embeds: torch.FloatTensor,
- timestep_img: Union[torch.Tensor, float, int],
- timestep_text: Union[torch.Tensor, float, int],
- data_type: Optional[Union[torch.Tensor, float, int]] = 1,
- encoder_hidden_states=None,
- cross_attention_kwargs=None,
- ):
- """
- Args:
- latent_image_embeds (`torch.FloatTensor` of shape `(batch size, latent channels, height, width)`):
- Latent image representation from the VAE encoder.
- image_embeds (`torch.FloatTensor` of shape `(batch size, 1, clip_img_dim)`):
- CLIP-embedded image representation (unsqueezed in the first dimension).
- prompt_embeds (`torch.FloatTensor` of shape `(batch size, seq_len, text_dim)`):
- CLIP-embedded text representation.
- timestep_img (`torch.long` or `float` or `int`):
- Current denoising step for the image.
- timestep_text (`torch.long` or `float` or `int`):
- Current denoising step for the text.
- data_type: (`torch.int` or `float` or `int`, *optional*, defaults to `1`):
- Only used in UniDiffuser-v1-style models. Can be either `1`, to use weights trained on nonpublic data,
- or `0` otherwise.
- encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
- Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
- self-attention.
- cross_attention_kwargs (*optional*):
- Keyword arguments to supply to the cross attention layers, if used.
-
-
- Returns:
- `tuple`: Returns relevant parts of the model's noise prediction: the first element of the tuple is tbe VAE
- image embedding, the second element is the CLIP image embedding, and the third element is the CLIP text
- embedding.
- """
- batch_size = latent_image_embeds.shape[0]
-
- # 1. Input
- # 1.1. Map inputs to shape (B, N, inner_dim)
- vae_hidden_states = self.vae_img_in(latent_image_embeds)
- clip_hidden_states = self.clip_img_in(image_embeds)
- text_hidden_states = self.text_in(prompt_embeds)
-
- num_text_tokens, num_img_tokens = text_hidden_states.size(1), vae_hidden_states.size(1)
-
- # 1.2. Encode image timesteps to single token (B, 1, inner_dim)
- if not torch.is_tensor(timestep_img):
- timestep_img = torch.tensor([timestep_img], dtype=torch.long, device=vae_hidden_states.device)
-
- # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
- timestep_img = timestep_img * torch.ones(batch_size, dtype=timestep_img.dtype, device=timestep_img.device)
-
- timestep_img_token = self.timestep_img_proj(timestep_img)
- # t_img_token does not contain any weights and will always return f32 tensors
- # but time_embedding might be fp16, so we need to cast here.
- timestep_img_token = timestep_img_token.to(dtype=self.dtype)
- timestep_img_token = self.timestep_img_embed(timestep_img_token)
- timestep_img_token = timestep_img_token.unsqueeze(dim=1)
-
- # 1.3. Encode text timesteps to single token (B, 1, inner_dim)
- if not torch.is_tensor(timestep_text):
- timestep_text = torch.tensor([timestep_text], dtype=torch.long, device=vae_hidden_states.device)
-
- # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
- timestep_text = timestep_text * torch.ones(batch_size, dtype=timestep_text.dtype, device=timestep_text.device)
-
- timestep_text_token = self.timestep_text_proj(timestep_text)
- # t_text_token does not contain any weights and will always return f32 tensors
- # but time_embedding might be fp16, so we need to cast here.
- timestep_text_token = timestep_text_token.to(dtype=self.dtype)
- timestep_text_token = self.timestep_text_embed(timestep_text_token)
- timestep_text_token = timestep_text_token.unsqueeze(dim=1)
-
- # 1.4. Concatenate all of the embeddings together.
- if self.use_data_type_embedding:
- assert data_type is not None, "data_type must be supplied if the model uses a data type embedding"
- if not torch.is_tensor(data_type):
- data_type = torch.tensor([data_type], dtype=torch.int, device=vae_hidden_states.device)
-
- # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
- data_type = data_type * torch.ones(batch_size, dtype=data_type.dtype, device=data_type.device)
-
- data_type_token = self.data_type_token_embedding(data_type).unsqueeze(dim=1)
- hidden_states = torch.cat(
- [
- timestep_img_token,
- timestep_text_token,
- data_type_token,
- text_hidden_states,
- clip_hidden_states,
- vae_hidden_states,
- ],
- dim=1,
- )
- else:
- hidden_states = torch.cat(
- [timestep_img_token, timestep_text_token, text_hidden_states, clip_hidden_states, vae_hidden_states],
- dim=1,
- )
-
- # 1.5. Prepare the positional embeddings and add to hidden states
- # Note: I think img_vae should always have the proper shape, so there's no need to interpolate
- # the position embeddings.
- if self.use_data_type_embedding:
- pos_embed = torch.cat(
- [self.pos_embed[:, : 1 + 1, :], self.data_type_pos_embed_token, self.pos_embed[:, 1 + 1 :, :]], dim=1
- )
- else:
- pos_embed = self.pos_embed
- hidden_states = hidden_states + pos_embed
- hidden_states = self.pos_embed_drop(hidden_states)
-
- # 2. Blocks
- hidden_states = self.transformer(
- hidden_states,
- encoder_hidden_states=encoder_hidden_states,
- timestep=None,
- class_labels=None,
- cross_attention_kwargs=cross_attention_kwargs,
- return_dict=False,
- hidden_states_is_embedding=True,
- unpatchify=False,
- )[0]
-
- # 3. Output
- # Split out the predicted noise representation.
- if self.use_data_type_embedding:
- (
- t_img_token_out,
- t_text_token_out,
- data_type_token_out,
- text_out,
- img_clip_out,
- img_vae_out,
- ) = hidden_states.split((1, 1, 1, num_text_tokens, 1, num_img_tokens), dim=1)
- else:
- t_img_token_out, t_text_token_out, text_out, img_clip_out, img_vae_out = hidden_states.split(
- (1, 1, num_text_tokens, 1, num_img_tokens), dim=1
- )
-
- img_vae_out = self.vae_img_out(img_vae_out)
-
- # unpatchify
- height = width = int(img_vae_out.shape[1] ** 0.5)
- img_vae_out = img_vae_out.reshape(
- shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
- )
- img_vae_out = torch.einsum("nhwpqc->nchpwq", img_vae_out)
- img_vae_out = img_vae_out.reshape(
- shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
- )
-
- img_clip_out = self.clip_img_out(img_clip_out)
-
- text_out = self.text_out(text_out)
-
- return img_vae_out, img_clip_out, text_out
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/get_modified_files.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/get_modified_files.py
deleted file mode 100644
index 650c61ccb21eff8407147563b103733b472546cd..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/get_modified_files.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# coding=utf-8
-# Copyright 2023 The HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
-# python ./utils/get_modified_files.py utils src tests examples
-#
-# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
-# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
-
-import re
-import subprocess
-import sys
-
-
-fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
-modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split()
-
-joined_dirs = "|".join(sys.argv[1:])
-regex = re.compile(rf"^({joined_dirs}).*?\.py$")
-
-relevant_modified_files = [x for x in modified_files if regex.match(x)]
-print(" ".join(relevant_modified_files), end="")
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
deleted file mode 100644
index f5fee7e13cdfd531bf24d7c261e843855124f762..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
+++ /dev/null
@@ -1,5 +0,0 @@
-_base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py'
-model = dict(
- backbone=dict(
- dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
- stage_with_dcn=(False, True, True, True)))
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py
deleted file mode 100644
index ad7b0346a63dfa3c3ca246b624155fc4fd331a3f..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py
+++ /dev/null
@@ -1,12 +0,0 @@
-_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
-model = dict(
- roi_head=dict(
- bbox_roi_extractor=dict(
- type='SingleRoIExtractor',
- roi_layer=dict(
- _delete_=True,
- type='ModulatedDeformRoIPoolPack',
- output_size=7,
- output_channels=256),
- out_channels=256,
- featmap_strides=[4, 8, 16, 32])))
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py
deleted file mode 100644
index 585cc2c332fd88a9f0164b14084d45d7a3783b11..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/hrnet/faster_rcnn_hrnetv2p_w40_2x_coco.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './faster_rcnn_hrnetv2p_w40_1x_coco.py'
-# learning policy
-lr_config = dict(step=[16, 22])
-runner = dict(type='EpochBasedRunner', max_epochs=24)
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/yolo_head.py b/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/yolo_head.py
deleted file mode 100644
index 25a005d36903333f37a6c6d31b4d613c071f4a07..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/yolo_head.py
+++ /dev/null
@@ -1,577 +0,0 @@
-# Copyright (c) 2019 Western Digital Corporation or its affiliates.
-
-import warnings
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmcv.cnn import ConvModule, normal_init
-from mmcv.runner import force_fp32
-
-from mmdet.core import (build_anchor_generator, build_assigner,
- build_bbox_coder, build_sampler, images_to_levels,
- multi_apply, multiclass_nms)
-from ..builder import HEADS, build_loss
-from .base_dense_head import BaseDenseHead
-from .dense_test_mixins import BBoxTestMixin
-
-
-@HEADS.register_module()
-class YOLOV3Head(BaseDenseHead, BBoxTestMixin):
- """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767.
-
- Args:
- num_classes (int): The number of object classes (w/o background)
- in_channels (List[int]): Number of input channels per scale.
- out_channels (List[int]): The number of output channels per scale
- before the final 1x1 layer. Default: (1024, 512, 256).
- anchor_generator (dict): Config dict for anchor generator
- bbox_coder (dict): Config of bounding box coder.
- featmap_strides (List[int]): The stride of each scale.
- Should be in descending order. Default: (32, 16, 8).
- one_hot_smoother (float): Set a non-zero value to enable label-smooth
- Default: 0.
- conv_cfg (dict): Config dict for convolution layer. Default: None.
- norm_cfg (dict): Dictionary to construct and config norm layer.
- Default: dict(type='BN', requires_grad=True)
- act_cfg (dict): Config dict for activation layer.
- Default: dict(type='LeakyReLU', negative_slope=0.1).
- loss_cls (dict): Config of classification loss.
- loss_conf (dict): Config of confidence loss.
- loss_xy (dict): Config of xy coordinate loss.
- loss_wh (dict): Config of wh coordinate loss.
- train_cfg (dict): Training config of YOLOV3 head. Default: None.
- test_cfg (dict): Testing config of YOLOV3 head. Default: None.
- """
-
- def __init__(self,
- num_classes,
- in_channels,
- out_channels=(1024, 512, 256),
- anchor_generator=dict(
- type='YOLOAnchorGenerator',
- base_sizes=[[(116, 90), (156, 198), (373, 326)],
- [(30, 61), (62, 45), (59, 119)],
- [(10, 13), (16, 30), (33, 23)]],
- strides=[32, 16, 8]),
- bbox_coder=dict(type='YOLOBBoxCoder'),
- featmap_strides=[32, 16, 8],
- one_hot_smoother=0.,
- conv_cfg=None,
- norm_cfg=dict(type='BN', requires_grad=True),
- act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
- loss_cls=dict(
- type='CrossEntropyLoss',
- use_sigmoid=True,
- loss_weight=1.0),
- loss_conf=dict(
- type='CrossEntropyLoss',
- use_sigmoid=True,
- loss_weight=1.0),
- loss_xy=dict(
- type='CrossEntropyLoss',
- use_sigmoid=True,
- loss_weight=1.0),
- loss_wh=dict(type='MSELoss', loss_weight=1.0),
- train_cfg=None,
- test_cfg=None):
- super(YOLOV3Head, self).__init__()
- # Check params
- assert (len(in_channels) == len(out_channels) == len(featmap_strides))
-
- self.num_classes = num_classes
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.featmap_strides = featmap_strides
- self.train_cfg = train_cfg
- self.test_cfg = test_cfg
- if self.train_cfg:
- self.assigner = build_assigner(self.train_cfg.assigner)
- if hasattr(self.train_cfg, 'sampler'):
- sampler_cfg = self.train_cfg.sampler
- else:
- sampler_cfg = dict(type='PseudoSampler')
- self.sampler = build_sampler(sampler_cfg, context=self)
-
- self.one_hot_smoother = one_hot_smoother
-
- self.conv_cfg = conv_cfg
- self.norm_cfg = norm_cfg
- self.act_cfg = act_cfg
-
- self.bbox_coder = build_bbox_coder(bbox_coder)
- self.anchor_generator = build_anchor_generator(anchor_generator)
-
- self.loss_cls = build_loss(loss_cls)
- self.loss_conf = build_loss(loss_conf)
- self.loss_xy = build_loss(loss_xy)
- self.loss_wh = build_loss(loss_wh)
- # usually the numbers of anchors for each level are the same
- # except SSD detectors
- self.num_anchors = self.anchor_generator.num_base_anchors[0]
- assert len(
- self.anchor_generator.num_base_anchors) == len(featmap_strides)
- self._init_layers()
-
- @property
- def num_levels(self):
- return len(self.featmap_strides)
-
- @property
- def num_attrib(self):
- """int: number of attributes in pred_map, bboxes (4) +
- objectness (1) + num_classes"""
-
- return 5 + self.num_classes
-
- def _init_layers(self):
- self.convs_bridge = nn.ModuleList()
- self.convs_pred = nn.ModuleList()
- for i in range(self.num_levels):
- conv_bridge = ConvModule(
- self.in_channels[i],
- self.out_channels[i],
- 3,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- conv_pred = nn.Conv2d(self.out_channels[i],
- self.num_anchors * self.num_attrib, 1)
-
- self.convs_bridge.append(conv_bridge)
- self.convs_pred.append(conv_pred)
-
- def init_weights(self):
- """Initialize weights of the head."""
- for m in self.convs_pred:
- normal_init(m, std=0.01)
-
- def forward(self, feats):
- """Forward features from the upstream network.
-
- Args:
- feats (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
-
- Returns:
- tuple[Tensor]: A tuple of multi-level predication map, each is a
- 4D-tensor of shape (batch_size, 5+num_classes, height, width).
- """
-
- assert len(feats) == self.num_levels
- pred_maps = []
- for i in range(self.num_levels):
- x = feats[i]
- x = self.convs_bridge[i](x)
- pred_map = self.convs_pred[i](x)
- pred_maps.append(pred_map)
-
- return tuple(pred_maps),
-
- @force_fp32(apply_to=('pred_maps', ))
- def get_bboxes(self,
- pred_maps,
- img_metas,
- cfg=None,
- rescale=False,
- with_nms=True):
- """Transform network output for a batch into bbox predictions.
-
- Args:
- pred_maps (list[Tensor]): Raw predictions for a batch of images.
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- cfg (mmcv.Config | None): Test / postprocessing configuration,
- if None, test_cfg would be used. Default: None.
- rescale (bool): If True, return boxes in original image space.
- Default: False.
- with_nms (bool): If True, do nms before return boxes.
- Default: True.
-
- Returns:
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
- The first item is an (n, 5) tensor, where 5 represent
- (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
- The shape of the second tensor in the tuple is (n,), and
- each element represents the class label of the corresponding
- box.
- """
- num_levels = len(pred_maps)
- pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)]
- scale_factors = [
- img_metas[i]['scale_factor']
- for i in range(pred_maps_list[0].shape[0])
- ]
- result_list = self._get_bboxes(pred_maps_list, scale_factors, cfg,
- rescale, with_nms)
- return result_list
-
- def _get_bboxes(self,
- pred_maps_list,
- scale_factors,
- cfg,
- rescale=False,
- with_nms=True):
- """Transform outputs for a single batch item into bbox predictions.
-
- Args:
- pred_maps_list (list[Tensor]): Prediction maps for different scales
- of each single image in the batch.
- scale_factors (list(ndarray)): Scale factor of the image arrange as
- (w_scale, h_scale, w_scale, h_scale).
- cfg (mmcv.Config | None): Test / postprocessing configuration,
- if None, test_cfg would be used.
- rescale (bool): If True, return boxes in original image space.
- Default: False.
- with_nms (bool): If True, do nms before return boxes.
- Default: True.
-
- Returns:
- list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
- The first item is an (n, 5) tensor, where 5 represent
- (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
- The shape of the second tensor in the tuple is (n,), and
- each element represents the class label of the corresponding
- box.
- """
- cfg = self.test_cfg if cfg is None else cfg
- assert len(pred_maps_list) == self.num_levels
-
- device = pred_maps_list[0].device
- batch_size = pred_maps_list[0].shape[0]
-
- featmap_sizes = [
- pred_maps_list[i].shape[-2:] for i in range(self.num_levels)
- ]
- multi_lvl_anchors = self.anchor_generator.grid_anchors(
- featmap_sizes, device)
- # convert to tensor to keep tracing
- nms_pre_tensor = torch.tensor(
- cfg.get('nms_pre', -1), device=device, dtype=torch.long)
-
- multi_lvl_bboxes = []
- multi_lvl_cls_scores = []
- multi_lvl_conf_scores = []
- for i in range(self.num_levels):
- # get some key info for current scale
- pred_map = pred_maps_list[i]
- stride = self.featmap_strides[i]
- # (b,h, w, num_anchors*num_attrib) ->
- # (b,h*w*num_anchors, num_attrib)
- pred_map = pred_map.permute(0, 2, 3,
- 1).reshape(batch_size, -1,
- self.num_attrib)
- # Inplace operation like
- # ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])```
- # would create constant tensor when exporting to onnx
- pred_map_conf = torch.sigmoid(pred_map[..., :2])
- pred_map_rest = pred_map[..., 2:]
- pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1)
- pred_map_boxes = pred_map[..., :4]
- multi_lvl_anchor = multi_lvl_anchors[i]
- multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes)
- bbox_pred = self.bbox_coder.decode(multi_lvl_anchor,
- pred_map_boxes, stride)
- # conf and cls
- conf_pred = torch.sigmoid(pred_map[..., 4])
- cls_pred = torch.sigmoid(pred_map[..., 5:]).view(
- batch_size, -1, self.num_classes) # Cls pred one-hot.
-
- # Get top-k prediction
- # Always keep topk op for dynamic input in onnx
- if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
- or conf_pred.shape[1] > nms_pre_tensor):
- from torch import _shape_as_tensor
- # keep shape as tensor and get k
- num_anchor = _shape_as_tensor(conf_pred)[1].to(device)
- nms_pre = torch.where(nms_pre_tensor < num_anchor,
- nms_pre_tensor, num_anchor)
- _, topk_inds = conf_pred.topk(nms_pre)
- batch_inds = torch.arange(batch_size).view(
- -1, 1).expand_as(topk_inds).long()
- bbox_pred = bbox_pred[batch_inds, topk_inds, :]
- cls_pred = cls_pred[batch_inds, topk_inds, :]
- conf_pred = conf_pred[batch_inds, topk_inds]
-
- # Save the result of current scale
- multi_lvl_bboxes.append(bbox_pred)
- multi_lvl_cls_scores.append(cls_pred)
- multi_lvl_conf_scores.append(conf_pred)
-
- # Merge the results of different scales together
- batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1)
- batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1)
- batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1)
-
- # Set max number of box to be feed into nms in deployment
- deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
- if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
- _, topk_inds = batch_mlvl_conf_scores.topk(deploy_nms_pre)
- batch_inds = torch.arange(batch_size).view(
- -1, 1).expand_as(topk_inds).long()
- batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :]
- batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :]
- batch_mlvl_conf_scores = batch_mlvl_conf_scores[batch_inds,
- topk_inds]
-
- if with_nms and (batch_mlvl_conf_scores.size(0) == 0):
- return torch.zeros((0, 5)), torch.zeros((0, ))
-
- if rescale:
- batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
- scale_factors).unsqueeze(1)
-
- # In mmdet 2.x, the class_id for background is num_classes.
- # i.e., the last column.
- padding = batch_mlvl_scores.new_zeros(batch_size,
- batch_mlvl_scores.shape[1], 1)
- batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
-
- # Support exporting to onnx without nms
- if with_nms and cfg.get('nms', None) is not None:
- det_results = []
- for (mlvl_bboxes, mlvl_scores,
- mlvl_conf_scores) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
- batch_mlvl_conf_scores):
- # Filtering out all predictions with conf < conf_thr
- conf_thr = cfg.get('conf_thr', -1)
- if conf_thr > 0 and (not torch.onnx.is_in_onnx_export()):
- # TensorRT not support NonZero
- # add as_tuple=False for compatibility in Pytorch 1.6
- # flatten would create a Reshape op with constant values,
- # and raise RuntimeError when doing inference in ONNX
- # Runtime with a different input image (#4221).
- conf_inds = mlvl_conf_scores.ge(conf_thr).nonzero(
- as_tuple=False).squeeze(1)
- mlvl_bboxes = mlvl_bboxes[conf_inds, :]
- mlvl_scores = mlvl_scores[conf_inds, :]
- mlvl_conf_scores = mlvl_conf_scores[conf_inds]
-
- det_bboxes, det_labels = multiclass_nms(
- mlvl_bboxes,
- mlvl_scores,
- cfg.score_thr,
- cfg.nms,
- cfg.max_per_img,
- score_factors=mlvl_conf_scores)
- det_results.append(tuple([det_bboxes, det_labels]))
-
- else:
- det_results = [
- tuple(mlvl_bs)
- for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
- batch_mlvl_conf_scores)
- ]
- return det_results
-
- @force_fp32(apply_to=('pred_maps', ))
- def loss(self,
- pred_maps,
- gt_bboxes,
- gt_labels,
- img_metas,
- gt_bboxes_ignore=None):
- """Compute loss of the head.
-
- Args:
- pred_maps (list[Tensor]): Prediction map for each scale level,
- shape (N, num_anchors * num_attrib, H, W)
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
- gt_labels (list[Tensor]): class indices corresponding to each box
- img_metas (list[dict]): Meta information of each image, e.g.,
- image size, scaling factor, etc.
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
- boxes can be ignored when computing the loss.
-
- Returns:
- dict[str, Tensor]: A dictionary of loss components.
- """
- num_imgs = len(img_metas)
- device = pred_maps[0][0].device
-
- featmap_sizes = [
- pred_maps[i].shape[-2:] for i in range(self.num_levels)
- ]
- multi_level_anchors = self.anchor_generator.grid_anchors(
- featmap_sizes, device)
- anchor_list = [multi_level_anchors for _ in range(num_imgs)]
-
- responsible_flag_list = []
- for img_id in range(len(img_metas)):
- responsible_flag_list.append(
- self.anchor_generator.responsible_flags(
- featmap_sizes, gt_bboxes[img_id], device))
-
- target_maps_list, neg_maps_list = self.get_targets(
- anchor_list, responsible_flag_list, gt_bboxes, gt_labels)
-
- losses_cls, losses_conf, losses_xy, losses_wh = multi_apply(
- self.loss_single, pred_maps, target_maps_list, neg_maps_list)
-
- return dict(
- loss_cls=losses_cls,
- loss_conf=losses_conf,
- loss_xy=losses_xy,
- loss_wh=losses_wh)
-
- def loss_single(self, pred_map, target_map, neg_map):
- """Compute loss of a single image from a batch.
-
- Args:
- pred_map (Tensor): Raw predictions for a single level.
- target_map (Tensor): The Ground-Truth target for a single level.
- neg_map (Tensor): The negative masks for a single level.
-
- Returns:
- tuple:
- loss_cls (Tensor): Classification loss.
- loss_conf (Tensor): Confidence loss.
- loss_xy (Tensor): Regression loss of x, y coordinate.
- loss_wh (Tensor): Regression loss of w, h coordinate.
- """
-
- num_imgs = len(pred_map)
- pred_map = pred_map.permute(0, 2, 3,
- 1).reshape(num_imgs, -1, self.num_attrib)
- neg_mask = neg_map.float()
- pos_mask = target_map[..., 4]
- pos_and_neg_mask = neg_mask + pos_mask
- pos_mask = pos_mask.unsqueeze(dim=-1)
- if torch.max(pos_and_neg_mask) > 1.:
- warnings.warn('There is overlap between pos and neg sample.')
- pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.)
-
- pred_xy = pred_map[..., :2]
- pred_wh = pred_map[..., 2:4]
- pred_conf = pred_map[..., 4]
- pred_label = pred_map[..., 5:]
-
- target_xy = target_map[..., :2]
- target_wh = target_map[..., 2:4]
- target_conf = target_map[..., 4]
- target_label = target_map[..., 5:]
-
- loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask)
- loss_conf = self.loss_conf(
- pred_conf, target_conf, weight=pos_and_neg_mask)
- loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask)
- loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask)
-
- return loss_cls, loss_conf, loss_xy, loss_wh
-
- def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list,
- gt_labels_list):
- """Compute target maps for anchors in multiple images.
-
- Args:
- anchor_list (list[list[Tensor]]): Multi level anchors of each
- image. The outer list indicates images, and the inner list
- corresponds to feature levels of the image. Each element of
- the inner list is a tensor of shape (num_total_anchors, 4).
- responsible_flag_list (list[list[Tensor]]): Multi level responsible
- flags of each image. Each element is a tensor of shape
- (num_total_anchors, )
- gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
- gt_labels_list (list[Tensor]): Ground truth labels of each box.
-
- Returns:
- tuple: Usually returns a tuple containing learning targets.
- - target_map_list (list[Tensor]): Target map of each level.
- - neg_map_list (list[Tensor]): Negative map of each level.
- """
- num_imgs = len(anchor_list)
-
- # anchor number of multi levels
- num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
-
- results = multi_apply(self._get_targets_single, anchor_list,
- responsible_flag_list, gt_bboxes_list,
- gt_labels_list)
-
- all_target_maps, all_neg_maps = results
- assert num_imgs == len(all_target_maps) == len(all_neg_maps)
- target_maps_list = images_to_levels(all_target_maps, num_level_anchors)
- neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors)
-
- return target_maps_list, neg_maps_list
-
- def _get_targets_single(self, anchors, responsible_flags, gt_bboxes,
- gt_labels):
- """Generate matching bounding box prior and converted GT.
-
- Args:
- anchors (list[Tensor]): Multi-level anchors of the image.
- responsible_flags (list[Tensor]): Multi-level responsible flags of
- anchors
- gt_bboxes (Tensor): Ground truth bboxes of single image.
- gt_labels (Tensor): Ground truth labels of single image.
-
- Returns:
- tuple:
- target_map (Tensor): Predication target map of each
- scale level, shape (num_total_anchors,
- 5+num_classes)
- neg_map (Tensor): Negative map of each scale level,
- shape (num_total_anchors,)
- """
-
- anchor_strides = []
- for i in range(len(anchors)):
- anchor_strides.append(
- torch.tensor(self.featmap_strides[i],
- device=gt_bboxes.device).repeat(len(anchors[i])))
- concat_anchors = torch.cat(anchors)
- concat_responsible_flags = torch.cat(responsible_flags)
-
- anchor_strides = torch.cat(anchor_strides)
- assert len(anchor_strides) == len(concat_anchors) == \
- len(concat_responsible_flags)
- assign_result = self.assigner.assign(concat_anchors,
- concat_responsible_flags,
- gt_bboxes)
- sampling_result = self.sampler.sample(assign_result, concat_anchors,
- gt_bboxes)
-
- target_map = concat_anchors.new_zeros(
- concat_anchors.size(0), self.num_attrib)
-
- target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode(
- sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes,
- anchor_strides[sampling_result.pos_inds])
-
- target_map[sampling_result.pos_inds, 4] = 1
-
- gt_labels_one_hot = F.one_hot(
- gt_labels, num_classes=self.num_classes).float()
- if self.one_hot_smoother != 0: # label smooth
- gt_labels_one_hot = gt_labels_one_hot * (
- 1 - self.one_hot_smoother
- ) + self.one_hot_smoother / self.num_classes
- target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[
- sampling_result.pos_assigned_gt_inds]
-
- neg_map = concat_anchors.new_zeros(
- concat_anchors.size(0), dtype=torch.uint8)
- neg_map[sampling_result.neg_inds] = 1
-
- return target_map, neg_map
-
- def aug_test(self, feats, img_metas, rescale=False):
- """Test function with test time augmentation.
-
- Args:
- feats (list[Tensor]): the outer list indicates test-time
- augmentations and inner Tensor should have a shape NxCxHxW,
- which contains features for all images in the batch.
- img_metas (list[list[dict]]): the outer list indicates test-time
- augs (multiscale, flip, etc.) and the inner list indicates
- images in a batch. each dict has image information.
- rescale (bool, optional): Whether to rescale the results.
- Defaults to False.
-
- Returns:
- list[ndarray]: bbox results of each class
- """
- return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py
deleted file mode 100644
index a8fbd9beb11f3d1308ce2cd12da2a177c2d39478..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py
+++ /dev/null
@@ -1,6 +0,0 @@
-_base_ = [
- '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/ade20k.py',
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
-]
-model = dict(
- decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py
deleted file mode 100644
index 0aada9d8dcd792fd4fc7da8908cc11d44a9ff521..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py
+++ /dev/null
@@ -1,39 +0,0 @@
-_base_ = './ocrnet_hr18_512x1024_40k_cityscapes.py'
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- pretrained='open-mmlab://msra/hrnetv2_w48',
- backbone=dict(
- extra=dict(
- stage2=dict(num_channels=(48, 96)),
- stage3=dict(num_channels=(48, 96, 192)),
- stage4=dict(num_channels=(48, 96, 192, 384)))),
- decode_head=[
- dict(
- type='FCNHead',
- in_channels=[48, 96, 192, 384],
- channels=sum([48, 96, 192, 384]),
- input_transform='resize_concat',
- in_index=(0, 1, 2, 3),
- kernel_size=1,
- num_convs=1,
- norm_cfg=norm_cfg,
- concat_input=False,
- dropout_ratio=-1,
- num_classes=19,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- dict(
- type='OCRHead',
- in_channels=[48, 96, 192, 384],
- channels=512,
- ocr_channels=256,
- input_transform='resize_concat',
- in_index=(0, 1, 2, 3),
- norm_cfg=norm_cfg,
- dropout_ratio=-1,
- num_classes=19,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
- ])
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py
deleted file mode 100644
index f8865a7c4d795d9de3f5bc6b762b305b3cabc22f..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r101-d8_512x512_80k_ade20k.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './psanet_r50-d8_512x512_80k_ade20k.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/apis/inference.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/apis/inference.py
deleted file mode 100644
index 90bc1c0c68525734bd6793f07c15fe97d3c8342c..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/apis/inference.py
+++ /dev/null
@@ -1,136 +0,0 @@
-import matplotlib.pyplot as plt
-import annotator.uniformer.mmcv as mmcv
-import torch
-from annotator.uniformer.mmcv.parallel import collate, scatter
-from annotator.uniformer.mmcv.runner import load_checkpoint
-
-from annotator.uniformer.mmseg.datasets.pipelines import Compose
-from annotator.uniformer.mmseg.models import build_segmentor
-
-
-def init_segmentor(config, checkpoint=None, device='cuda:0'):
- """Initialize a segmentor from config file.
-
- Args:
- config (str or :obj:`mmcv.Config`): Config file path or the config
- object.
- checkpoint (str, optional): Checkpoint path. If left as None, the model
- will not load any weights.
- device (str, optional) CPU/CUDA device option. Default 'cuda:0'.
- Use 'cpu' for loading model on CPU.
- Returns:
- nn.Module: The constructed segmentor.
- """
- if isinstance(config, str):
- config = mmcv.Config.fromfile(config)
- elif not isinstance(config, mmcv.Config):
- raise TypeError('config must be a filename or Config object, '
- 'but got {}'.format(type(config)))
- config.model.pretrained = None
- config.model.train_cfg = None
- model = build_segmentor(config.model, test_cfg=config.get('test_cfg'))
- if checkpoint is not None:
- checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
- model.CLASSES = checkpoint['meta']['CLASSES']
- model.PALETTE = checkpoint['meta']['PALETTE']
- model.cfg = config # save the config in the model for convenience
- model.to(device)
- model.eval()
- return model
-
-
-class LoadImage:
- """A simple pipeline to load image."""
-
- def __call__(self, results):
- """Call function to load images into results.
-
- Args:
- results (dict): A result dict contains the file name
- of the image to be read.
-
- Returns:
- dict: ``results`` will be returned containing loaded image.
- """
-
- if isinstance(results['img'], str):
- results['filename'] = results['img']
- results['ori_filename'] = results['img']
- else:
- results['filename'] = None
- results['ori_filename'] = None
- img = mmcv.imread(results['img'])
- results['img'] = img
- results['img_shape'] = img.shape
- results['ori_shape'] = img.shape
- return results
-
-
-def inference_segmentor(model, img):
- """Inference image(s) with the segmentor.
-
- Args:
- model (nn.Module): The loaded segmentor.
- imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
- images.
-
- Returns:
- (list[Tensor]): The segmentation result.
- """
- cfg = model.cfg
- device = next(model.parameters()).device # model device
- # build the data pipeline
- test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
- test_pipeline = Compose(test_pipeline)
- # prepare data
- data = dict(img=img)
- data = test_pipeline(data)
- data = collate([data], samples_per_gpu=1)
- if next(model.parameters()).is_cuda:
- # scatter to specified GPU
- data = scatter(data, [device])[0]
- else:
- data['img_metas'] = [i.data[0] for i in data['img_metas']]
-
- # forward the model
- with torch.no_grad():
- result = model(return_loss=False, rescale=True, **data)
- return result
-
-
-def show_result_pyplot(model,
- img,
- result,
- palette=None,
- fig_size=(15, 10),
- opacity=0.5,
- title='',
- block=True):
- """Visualize the segmentation results on the image.
-
- Args:
- model (nn.Module): The loaded segmentor.
- img (str or np.ndarray): Image filename or loaded image.
- result (list): The segmentation result.
- palette (list[list[int]]] | None): The palette of segmentation
- map. If None is given, random palette will be generated.
- Default: None
- fig_size (tuple): Figure size of the pyplot figure.
- opacity(float): Opacity of painted segmentation map.
- Default 0.5.
- Must be in (0, 1] range.
- title (str): The title of pyplot figure.
- Default is ''.
- block (bool): Whether to block the pyplot figure.
- Default is True.
- """
- if hasattr(model, 'module'):
- model = model.module
- img = model.show_result(
- img, result, palette=palette, show=False, opacity=opacity)
- # plt.figure(figsize=fig_size)
- # plt.imshow(mmcv.bgr2rgb(img))
- # plt.title(title)
- # plt.tight_layout()
- # plt.show(block=block)
- return mmcv.bgr2rgb(img)
diff --git a/spaces/ArcAhmedEssam/CLIP-Interrogator-2/share_btn.py b/spaces/ArcAhmedEssam/CLIP-Interrogator-2/share_btn.py
deleted file mode 100644
index 0f03b66de38b85ebd04515133ed354c362886b39..0000000000000000000000000000000000000000
--- a/spaces/ArcAhmedEssam/CLIP-Interrogator-2/share_btn.py
+++ /dev/null
@@ -1,70 +0,0 @@
-community_icon_html = """"""
-
-loading_icon_html = """"""
-
-share_js = """async () => {
- async function uploadFile(file){
- const UPLOAD_URL = 'https://huggingface.co/uploads';
- const response = await fetch(UPLOAD_URL, {
- method: 'POST',
- headers: {
- 'Content-Type': file.type,
- 'X-Requested-With': 'XMLHttpRequest',
- },
- body: file, /// <- File inherits from Blob
- });
- const url = await response.text();
- return url;
- }
- async function getInputImgFile(imgEl){
- const res = await fetch(imgEl.src);
- const blob = await res.blob();
- const imgId = Date.now() % 200;
- const isPng = imgEl.src.startsWith(`data:image/png`);
- if(isPng){
- const fileName = `sd-perception-${{imgId}}.png`;
- return new File([blob], fileName, { type: 'image/png' });
- }else{
- const fileName = `sd-perception-${{imgId}}.jpg`;
- return new File([blob], fileName, { type: 'image/jpeg' });
- }
- }
- const gradioEl = document.querySelector('body > gradio-app');
- // const gradioEl = document.querySelector("gradio-app").shadowRoot;
- const inputImgEl = gradioEl.querySelector('#input-img img');
- const outputTxt = gradioEl.querySelector('#output-txt textarea').value;
- let titleTxt = outputTxt;
- if(titleTxt.length > 100){
- titleTxt = titleTxt.slice(0, 100) + ' ...';
- }
- const shareBtnEl = gradioEl.querySelector('#share-btn');
- const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
- const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
- if(!outputTxt){
- return;
- };
- shareBtnEl.style.pointerEvents = 'none';
- shareIconEl.style.display = 'none';
- loadingIconEl.style.removeProperty('display');
- const inputFile = await getInputImgFile(inputImgEl);
- const urlInputImg = await uploadFile(inputFile);
- const descriptionMd = `#### Input img:
-
-#### Caption:
-${outputTxt}`;
- const params = new URLSearchParams({
- title: titleTxt,
- description: descriptionMd,
- });
- const paramsStr = params.toString();
- window.open(`https://huggingface.co/spaces/fffiloni/CLIP-Interrogator-2/discussions/new?${paramsStr}`, '_blank');
- shareBtnEl.style.removeProperty('pointer-events');
- shareIconEl.style.removeProperty('display');
- loadingIconEl.style.display = 'none';
-}"""
\ No newline at end of file
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/wheel.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/wheel.py
deleted file mode 100644
index a8cd1330f0f73ac76832bdbd6b455b10bd91ba83..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/operations/install/wheel.py
+++ /dev/null
@@ -1,740 +0,0 @@
-"""Support for installing and building the "wheel" binary package format.
-"""
-
-import collections
-import compileall
-import contextlib
-import csv
-import importlib
-import logging
-import os.path
-import re
-import shutil
-import sys
-import warnings
-from base64 import urlsafe_b64encode
-from email.message import Message
-from itertools import chain, filterfalse, starmap
-from typing import (
- IO,
- TYPE_CHECKING,
- Any,
- BinaryIO,
- Callable,
- Dict,
- Generator,
- Iterable,
- Iterator,
- List,
- NewType,
- Optional,
- Sequence,
- Set,
- Tuple,
- Union,
- cast,
-)
-from zipfile import ZipFile, ZipInfo
-
-from pip._vendor.distlib.scripts import ScriptMaker
-from pip._vendor.distlib.util import get_export_entry
-from pip._vendor.packaging.utils import canonicalize_name
-
-from pip._internal.exceptions import InstallationError
-from pip._internal.locations import get_major_minor_version
-from pip._internal.metadata import (
- BaseDistribution,
- FilesystemWheel,
- get_wheel_distribution,
-)
-from pip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl
-from pip._internal.models.scheme import SCHEME_KEYS, Scheme
-from pip._internal.utils.filesystem import adjacent_tmp_file, replace
-from pip._internal.utils.misc import captured_stdout, ensure_dir, hash_file, partition
-from pip._internal.utils.unpacking import (
- current_umask,
- is_within_directory,
- set_extracted_file_to_default_mode_plus_executable,
- zip_item_is_executable,
-)
-from pip._internal.utils.wheel import parse_wheel
-
-if TYPE_CHECKING:
- from typing import Protocol
-
- class File(Protocol):
- src_record_path: "RecordPath"
- dest_path: str
- changed: bool
-
- def save(self) -> None:
- pass
-
-
-logger = logging.getLogger(__name__)
-
-RecordPath = NewType("RecordPath", str)
-InstalledCSVRow = Tuple[RecordPath, str, Union[int, str]]
-
-
-def rehash(path: str, blocksize: int = 1 << 20) -> Tuple[str, str]:
- """Return (encoded_digest, length) for path using hashlib.sha256()"""
- h, length = hash_file(path, blocksize)
- digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=")
- return (digest, str(length))
-
-
-def csv_io_kwargs(mode: str) -> Dict[str, Any]:
- """Return keyword arguments to properly open a CSV file
- in the given mode.
- """
- return {"mode": mode, "newline": "", "encoding": "utf-8"}
-
-
-def fix_script(path: str) -> bool:
- """Replace #!python with #!/path/to/python
- Return True if file was changed.
- """
- # XXX RECORD hashes will need to be updated
- assert os.path.isfile(path)
-
- with open(path, "rb") as script:
- firstline = script.readline()
- if not firstline.startswith(b"#!python"):
- return False
- exename = sys.executable.encode(sys.getfilesystemencoding())
- firstline = b"#!" + exename + os.linesep.encode("ascii")
- rest = script.read()
- with open(path, "wb") as script:
- script.write(firstline)
- script.write(rest)
- return True
-
-
-def wheel_root_is_purelib(metadata: Message) -> bool:
- return metadata.get("Root-Is-Purelib", "").lower() == "true"
-
-
-def get_entrypoints(dist: BaseDistribution) -> Tuple[Dict[str, str], Dict[str, str]]:
- console_scripts = {}
- gui_scripts = {}
- for entry_point in dist.iter_entry_points():
- if entry_point.group == "console_scripts":
- console_scripts[entry_point.name] = entry_point.value
- elif entry_point.group == "gui_scripts":
- gui_scripts[entry_point.name] = entry_point.value
- return console_scripts, gui_scripts
-
-
-def message_about_scripts_not_on_PATH(scripts: Sequence[str]) -> Optional[str]:
- """Determine if any scripts are not on PATH and format a warning.
- Returns a warning message if one or more scripts are not on PATH,
- otherwise None.
- """
- if not scripts:
- return None
-
- # Group scripts by the path they were installed in
- grouped_by_dir: Dict[str, Set[str]] = collections.defaultdict(set)
- for destfile in scripts:
- parent_dir = os.path.dirname(destfile)
- script_name = os.path.basename(destfile)
- grouped_by_dir[parent_dir].add(script_name)
-
- # We don't want to warn for directories that are on PATH.
- not_warn_dirs = [
- os.path.normcase(os.path.normpath(i)).rstrip(os.sep)
- for i in os.environ.get("PATH", "").split(os.pathsep)
- ]
- # If an executable sits with sys.executable, we don't warn for it.
- # This covers the case of venv invocations without activating the venv.
- not_warn_dirs.append(
- os.path.normcase(os.path.normpath(os.path.dirname(sys.executable)))
- )
- warn_for: Dict[str, Set[str]] = {
- parent_dir: scripts
- for parent_dir, scripts in grouped_by_dir.items()
- if os.path.normcase(os.path.normpath(parent_dir)) not in not_warn_dirs
- }
- if not warn_for:
- return None
-
- # Format a message
- msg_lines = []
- for parent_dir, dir_scripts in warn_for.items():
- sorted_scripts: List[str] = sorted(dir_scripts)
- if len(sorted_scripts) == 1:
- start_text = "script {} is".format(sorted_scripts[0])
- else:
- start_text = "scripts {} are".format(
- ", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1]
- )
-
- msg_lines.append(
- "The {} installed in '{}' which is not on PATH.".format(
- start_text, parent_dir
- )
- )
-
- last_line_fmt = (
- "Consider adding {} to PATH or, if you prefer "
- "to suppress this warning, use --no-warn-script-location."
- )
- if len(msg_lines) == 1:
- msg_lines.append(last_line_fmt.format("this directory"))
- else:
- msg_lines.append(last_line_fmt.format("these directories"))
-
- # Add a note if any directory starts with ~
- warn_for_tilde = any(
- i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i
- )
- if warn_for_tilde:
- tilde_warning_msg = (
- "NOTE: The current PATH contains path(s) starting with `~`, "
- "which may not be expanded by all applications."
- )
- msg_lines.append(tilde_warning_msg)
-
- # Returns the formatted multiline message
- return "\n".join(msg_lines)
-
-
-def _normalized_outrows(
- outrows: Iterable[InstalledCSVRow],
-) -> List[Tuple[str, str, str]]:
- """Normalize the given rows of a RECORD file.
-
- Items in each row are converted into str. Rows are then sorted to make
- the value more predictable for tests.
-
- Each row is a 3-tuple (path, hash, size) and corresponds to a record of
- a RECORD file (see PEP 376 and PEP 427 for details). For the rows
- passed to this function, the size can be an integer as an int or string,
- or the empty string.
- """
- # Normally, there should only be one row per path, in which case the
- # second and third elements don't come into play when sorting.
- # However, in cases in the wild where a path might happen to occur twice,
- # we don't want the sort operation to trigger an error (but still want
- # determinism). Since the third element can be an int or string, we
- # coerce each element to a string to avoid a TypeError in this case.
- # For additional background, see--
- # https://github.com/pypa/pip/issues/5868
- return sorted(
- (record_path, hash_, str(size)) for record_path, hash_, size in outrows
- )
-
-
-def _record_to_fs_path(record_path: RecordPath, lib_dir: str) -> str:
- return os.path.join(lib_dir, record_path)
-
-
-def _fs_to_record_path(path: str, lib_dir: str) -> RecordPath:
- # On Windows, do not handle relative paths if they belong to different
- # logical disks
- if os.path.splitdrive(path)[0].lower() == os.path.splitdrive(lib_dir)[0].lower():
- path = os.path.relpath(path, lib_dir)
-
- path = path.replace(os.path.sep, "/")
- return cast("RecordPath", path)
-
-
-def get_csv_rows_for_installed(
- old_csv_rows: List[List[str]],
- installed: Dict[RecordPath, RecordPath],
- changed: Set[RecordPath],
- generated: List[str],
- lib_dir: str,
-) -> List[InstalledCSVRow]:
- """
- :param installed: A map from archive RECORD path to installation RECORD
- path.
- """
- installed_rows: List[InstalledCSVRow] = []
- for row in old_csv_rows:
- if len(row) > 3:
- logger.warning("RECORD line has more than three elements: %s", row)
- old_record_path = cast("RecordPath", row[0])
- new_record_path = installed.pop(old_record_path, old_record_path)
- if new_record_path in changed:
- digest, length = rehash(_record_to_fs_path(new_record_path, lib_dir))
- else:
- digest = row[1] if len(row) > 1 else ""
- length = row[2] if len(row) > 2 else ""
- installed_rows.append((new_record_path, digest, length))
- for f in generated:
- path = _fs_to_record_path(f, lib_dir)
- digest, length = rehash(f)
- installed_rows.append((path, digest, length))
- for installed_record_path in installed.values():
- installed_rows.append((installed_record_path, "", ""))
- return installed_rows
-
-
-def get_console_script_specs(console: Dict[str, str]) -> List[str]:
- """
- Given the mapping from entrypoint name to callable, return the relevant
- console script specs.
- """
- # Don't mutate caller's version
- console = console.copy()
-
- scripts_to_generate = []
-
- # Special case pip and setuptools to generate versioned wrappers
- #
- # The issue is that some projects (specifically, pip and setuptools) use
- # code in setup.py to create "versioned" entry points - pip2.7 on Python
- # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
- # the wheel metadata at build time, and so if the wheel is installed with
- # a *different* version of Python the entry points will be wrong. The
- # correct fix for this is to enhance the metadata to be able to describe
- # such versioned entry points, but that won't happen till Metadata 2.0 is
- # available.
- # In the meantime, projects using versioned entry points will either have
- # incorrect versioned entry points, or they will not be able to distribute
- # "universal" wheels (i.e., they will need a wheel per Python version).
- #
- # Because setuptools and pip are bundled with _ensurepip and virtualenv,
- # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
- # override the versioned entry points in the wheel and generate the
- # correct ones. This code is purely a short-term measure until Metadata 2.0
- # is available.
- #
- # To add the level of hack in this section of code, in order to support
- # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
- # variable which will control which version scripts get installed.
- #
- # ENSUREPIP_OPTIONS=altinstall
- # - Only pipX.Y and easy_install-X.Y will be generated and installed
- # ENSUREPIP_OPTIONS=install
- # - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
- # that this option is technically if ENSUREPIP_OPTIONS is set and is
- # not altinstall
- # DEFAULT
- # - The default behavior is to install pip, pipX, pipX.Y, easy_install
- # and easy_install-X.Y.
- pip_script = console.pop("pip", None)
- if pip_script:
- if "ENSUREPIP_OPTIONS" not in os.environ:
- scripts_to_generate.append("pip = " + pip_script)
-
- if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
- scripts_to_generate.append(
- "pip{} = {}".format(sys.version_info[0], pip_script)
- )
-
- scripts_to_generate.append(f"pip{get_major_minor_version()} = {pip_script}")
- # Delete any other versioned pip entry points
- pip_ep = [k for k in console if re.match(r"pip(\d+(\.\d+)?)?$", k)]
- for k in pip_ep:
- del console[k]
- easy_install_script = console.pop("easy_install", None)
- if easy_install_script:
- if "ENSUREPIP_OPTIONS" not in os.environ:
- scripts_to_generate.append("easy_install = " + easy_install_script)
-
- scripts_to_generate.append(
- "easy_install-{} = {}".format(
- get_major_minor_version(), easy_install_script
- )
- )
- # Delete any other versioned easy_install entry points
- easy_install_ep = [
- k for k in console if re.match(r"easy_install(-\d+\.\d+)?$", k)
- ]
- for k in easy_install_ep:
- del console[k]
-
- # Generate the console entry points specified in the wheel
- scripts_to_generate.extend(starmap("{} = {}".format, console.items()))
-
- return scripts_to_generate
-
-
-class ZipBackedFile:
- def __init__(
- self, src_record_path: RecordPath, dest_path: str, zip_file: ZipFile
- ) -> None:
- self.src_record_path = src_record_path
- self.dest_path = dest_path
- self._zip_file = zip_file
- self.changed = False
-
- def _getinfo(self) -> ZipInfo:
- return self._zip_file.getinfo(self.src_record_path)
-
- def save(self) -> None:
- # directory creation is lazy and after file filtering
- # to ensure we don't install empty dirs; empty dirs can't be
- # uninstalled.
- parent_dir = os.path.dirname(self.dest_path)
- ensure_dir(parent_dir)
-
- # When we open the output file below, any existing file is truncated
- # before we start writing the new contents. This is fine in most
- # cases, but can cause a segfault if pip has loaded a shared
- # object (e.g. from pyopenssl through its vendored urllib3)
- # Since the shared object is mmap'd an attempt to call a
- # symbol in it will then cause a segfault. Unlinking the file
- # allows writing of new contents while allowing the process to
- # continue to use the old copy.
- if os.path.exists(self.dest_path):
- os.unlink(self.dest_path)
-
- zipinfo = self._getinfo()
-
- with self._zip_file.open(zipinfo) as f:
- with open(self.dest_path, "wb") as dest:
- shutil.copyfileobj(f, dest)
-
- if zip_item_is_executable(zipinfo):
- set_extracted_file_to_default_mode_plus_executable(self.dest_path)
-
-
-class ScriptFile:
- def __init__(self, file: "File") -> None:
- self._file = file
- self.src_record_path = self._file.src_record_path
- self.dest_path = self._file.dest_path
- self.changed = False
-
- def save(self) -> None:
- self._file.save()
- self.changed = fix_script(self.dest_path)
-
-
-class MissingCallableSuffix(InstallationError):
- def __init__(self, entry_point: str) -> None:
- super().__init__(
- "Invalid script entry point: {} - A callable "
- "suffix is required. Cf https://packaging.python.org/"
- "specifications/entry-points/#use-for-scripts for more "
- "information.".format(entry_point)
- )
-
-
-def _raise_for_invalid_entrypoint(specification: str) -> None:
- entry = get_export_entry(specification)
- if entry is not None and entry.suffix is None:
- raise MissingCallableSuffix(str(entry))
-
-
-class PipScriptMaker(ScriptMaker):
- def make(
- self, specification: str, options: Optional[Dict[str, Any]] = None
- ) -> List[str]:
- _raise_for_invalid_entrypoint(specification)
- return super().make(specification, options)
-
-
-def _install_wheel(
- name: str,
- wheel_zip: ZipFile,
- wheel_path: str,
- scheme: Scheme,
- pycompile: bool = True,
- warn_script_location: bool = True,
- direct_url: Optional[DirectUrl] = None,
- requested: bool = False,
-) -> None:
- """Install a wheel.
-
- :param name: Name of the project to install
- :param wheel_zip: open ZipFile for wheel being installed
- :param scheme: Distutils scheme dictating the install directories
- :param req_description: String used in place of the requirement, for
- logging
- :param pycompile: Whether to byte-compile installed Python files
- :param warn_script_location: Whether to check that scripts are installed
- into a directory on PATH
- :raises UnsupportedWheel:
- * when the directory holds an unpacked wheel with incompatible
- Wheel-Version
- * when the .dist-info dir does not match the wheel
- """
- info_dir, metadata = parse_wheel(wheel_zip, name)
-
- if wheel_root_is_purelib(metadata):
- lib_dir = scheme.purelib
- else:
- lib_dir = scheme.platlib
-
- # Record details of the files moved
- # installed = files copied from the wheel to the destination
- # changed = files changed while installing (scripts #! line typically)
- # generated = files newly generated during the install (script wrappers)
- installed: Dict[RecordPath, RecordPath] = {}
- changed: Set[RecordPath] = set()
- generated: List[str] = []
-
- def record_installed(
- srcfile: RecordPath, destfile: str, modified: bool = False
- ) -> None:
- """Map archive RECORD paths to installation RECORD paths."""
- newpath = _fs_to_record_path(destfile, lib_dir)
- installed[srcfile] = newpath
- if modified:
- changed.add(newpath)
-
- def is_dir_path(path: RecordPath) -> bool:
- return path.endswith("/")
-
- def assert_no_path_traversal(dest_dir_path: str, target_path: str) -> None:
- if not is_within_directory(dest_dir_path, target_path):
- message = (
- "The wheel {!r} has a file {!r} trying to install"
- " outside the target directory {!r}"
- )
- raise InstallationError(
- message.format(wheel_path, target_path, dest_dir_path)
- )
-
- def root_scheme_file_maker(
- zip_file: ZipFile, dest: str
- ) -> Callable[[RecordPath], "File"]:
- def make_root_scheme_file(record_path: RecordPath) -> "File":
- normed_path = os.path.normpath(record_path)
- dest_path = os.path.join(dest, normed_path)
- assert_no_path_traversal(dest, dest_path)
- return ZipBackedFile(record_path, dest_path, zip_file)
-
- return make_root_scheme_file
-
- def data_scheme_file_maker(
- zip_file: ZipFile, scheme: Scheme
- ) -> Callable[[RecordPath], "File"]:
- scheme_paths = {key: getattr(scheme, key) for key in SCHEME_KEYS}
-
- def make_data_scheme_file(record_path: RecordPath) -> "File":
- normed_path = os.path.normpath(record_path)
- try:
- _, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2)
- except ValueError:
- message = (
- "Unexpected file in {}: {!r}. .data directory contents"
- " should be named like: '/'."
- ).format(wheel_path, record_path)
- raise InstallationError(message)
-
- try:
- scheme_path = scheme_paths[scheme_key]
- except KeyError:
- valid_scheme_keys = ", ".join(sorted(scheme_paths))
- message = (
- "Unknown scheme key used in {}: {} (for file {!r}). .data"
- " directory contents should be in subdirectories named"
- " with a valid scheme key ({})"
- ).format(wheel_path, scheme_key, record_path, valid_scheme_keys)
- raise InstallationError(message)
-
- dest_path = os.path.join(scheme_path, dest_subpath)
- assert_no_path_traversal(scheme_path, dest_path)
- return ZipBackedFile(record_path, dest_path, zip_file)
-
- return make_data_scheme_file
-
- def is_data_scheme_path(path: RecordPath) -> bool:
- return path.split("/", 1)[0].endswith(".data")
-
- paths = cast(List[RecordPath], wheel_zip.namelist())
- file_paths = filterfalse(is_dir_path, paths)
- root_scheme_paths, data_scheme_paths = partition(is_data_scheme_path, file_paths)
-
- make_root_scheme_file = root_scheme_file_maker(wheel_zip, lib_dir)
- files: Iterator[File] = map(make_root_scheme_file, root_scheme_paths)
-
- def is_script_scheme_path(path: RecordPath) -> bool:
- parts = path.split("/", 2)
- return len(parts) > 2 and parts[0].endswith(".data") and parts[1] == "scripts"
-
- other_scheme_paths, script_scheme_paths = partition(
- is_script_scheme_path, data_scheme_paths
- )
-
- make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme)
- other_scheme_files = map(make_data_scheme_file, other_scheme_paths)
- files = chain(files, other_scheme_files)
-
- # Get the defined entry points
- distribution = get_wheel_distribution(
- FilesystemWheel(wheel_path),
- canonicalize_name(name),
- )
- console, gui = get_entrypoints(distribution)
-
- def is_entrypoint_wrapper(file: "File") -> bool:
- # EP, EP.exe and EP-script.py are scripts generated for
- # entry point EP by setuptools
- path = file.dest_path
- name = os.path.basename(path)
- if name.lower().endswith(".exe"):
- matchname = name[:-4]
- elif name.lower().endswith("-script.py"):
- matchname = name[:-10]
- elif name.lower().endswith(".pya"):
- matchname = name[:-4]
- else:
- matchname = name
- # Ignore setuptools-generated scripts
- return matchname in console or matchname in gui
-
- script_scheme_files: Iterator[File] = map(
- make_data_scheme_file, script_scheme_paths
- )
- script_scheme_files = filterfalse(is_entrypoint_wrapper, script_scheme_files)
- script_scheme_files = map(ScriptFile, script_scheme_files)
- files = chain(files, script_scheme_files)
-
- for file in files:
- file.save()
- record_installed(file.src_record_path, file.dest_path, file.changed)
-
- def pyc_source_file_paths() -> Generator[str, None, None]:
- # We de-duplicate installation paths, since there can be overlap (e.g.
- # file in .data maps to same location as file in wheel root).
- # Sorting installation paths makes it easier to reproduce and debug
- # issues related to permissions on existing files.
- for installed_path in sorted(set(installed.values())):
- full_installed_path = os.path.join(lib_dir, installed_path)
- if not os.path.isfile(full_installed_path):
- continue
- if not full_installed_path.endswith(".py"):
- continue
- yield full_installed_path
-
- def pyc_output_path(path: str) -> str:
- """Return the path the pyc file would have been written to."""
- return importlib.util.cache_from_source(path)
-
- # Compile all of the pyc files for the installed files
- if pycompile:
- with captured_stdout() as stdout:
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore")
- for path in pyc_source_file_paths():
- success = compileall.compile_file(path, force=True, quiet=True)
- if success:
- pyc_path = pyc_output_path(path)
- assert os.path.exists(pyc_path)
- pyc_record_path = cast(
- "RecordPath", pyc_path.replace(os.path.sep, "/")
- )
- record_installed(pyc_record_path, pyc_path)
- logger.debug(stdout.getvalue())
-
- maker = PipScriptMaker(None, scheme.scripts)
-
- # Ensure old scripts are overwritten.
- # See https://github.com/pypa/pip/issues/1800
- maker.clobber = True
-
- # Ensure we don't generate any variants for scripts because this is almost
- # never what somebody wants.
- # See https://bitbucket.org/pypa/distlib/issue/35/
- maker.variants = {""}
-
- # This is required because otherwise distlib creates scripts that are not
- # executable.
- # See https://bitbucket.org/pypa/distlib/issue/32/
- maker.set_mode = True
-
- # Generate the console and GUI entry points specified in the wheel
- scripts_to_generate = get_console_script_specs(console)
-
- gui_scripts_to_generate = list(starmap("{} = {}".format, gui.items()))
-
- generated_console_scripts = maker.make_multiple(scripts_to_generate)
- generated.extend(generated_console_scripts)
-
- generated.extend(maker.make_multiple(gui_scripts_to_generate, {"gui": True}))
-
- if warn_script_location:
- msg = message_about_scripts_not_on_PATH(generated_console_scripts)
- if msg is not None:
- logger.warning(msg)
-
- generated_file_mode = 0o666 & ~current_umask()
-
- @contextlib.contextmanager
- def _generate_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]:
- with adjacent_tmp_file(path, **kwargs) as f:
- yield f
- os.chmod(f.name, generated_file_mode)
- replace(f.name, path)
-
- dest_info_dir = os.path.join(lib_dir, info_dir)
-
- # Record pip as the installer
- installer_path = os.path.join(dest_info_dir, "INSTALLER")
- with _generate_file(installer_path) as installer_file:
- installer_file.write(b"pip\n")
- generated.append(installer_path)
-
- # Record the PEP 610 direct URL reference
- if direct_url is not None:
- direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME)
- with _generate_file(direct_url_path) as direct_url_file:
- direct_url_file.write(direct_url.to_json().encode("utf-8"))
- generated.append(direct_url_path)
-
- # Record the REQUESTED file
- if requested:
- requested_path = os.path.join(dest_info_dir, "REQUESTED")
- with open(requested_path, "wb"):
- pass
- generated.append(requested_path)
-
- record_text = distribution.read_text("RECORD")
- record_rows = list(csv.reader(record_text.splitlines()))
-
- rows = get_csv_rows_for_installed(
- record_rows,
- installed=installed,
- changed=changed,
- generated=generated,
- lib_dir=lib_dir,
- )
-
- # Record details of all files installed
- record_path = os.path.join(dest_info_dir, "RECORD")
-
- with _generate_file(record_path, **csv_io_kwargs("w")) as record_file:
- # Explicitly cast to typing.IO[str] as a workaround for the mypy error:
- # "writer" has incompatible type "BinaryIO"; expected "_Writer"
- writer = csv.writer(cast("IO[str]", record_file))
- writer.writerows(_normalized_outrows(rows))
-
-
-@contextlib.contextmanager
-def req_error_context(req_description: str) -> Generator[None, None, None]:
- try:
- yield
- except InstallationError as e:
- message = "For req: {}. {}".format(req_description, e.args[0])
- raise InstallationError(message) from e
-
-
-def install_wheel(
- name: str,
- wheel_path: str,
- scheme: Scheme,
- req_description: str,
- pycompile: bool = True,
- warn_script_location: bool = True,
- direct_url: Optional[DirectUrl] = None,
- requested: bool = False,
-) -> None:
- with ZipFile(wheel_path, allowZip64=True) as z:
- with req_error_context(req_description):
- _install_wheel(
- name=name,
- wheel_zip=z,
- wheel_path=wheel_path,
- scheme=scheme,
- pycompile=pycompile,
- warn_script_location=warn_script_location,
- direct_url=direct_url,
- requested=requested,
- )
diff --git a/spaces/Benson/text-generation/Examples/Avarampoo Descarga De Pelculas Pelculas.md b/spaces/Benson/text-generation/Examples/Avarampoo Descarga De Pelculas Pelculas.md
deleted file mode 100644
index 8f55803053c332773b056ec3801ec5eee03e8b97..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Avarampoo Descarga De Pelculas Pelculas.md
+++ /dev/null
@@ -1,71 +0,0 @@
-
-
Avarampoo Movie Download Moviesda: Cómo ver en línea gratis
-
¿Eres un fan de las películas tamiles y buscas una manera de ver la película de Avarampoo en línea gratis? Si es así, entonces has venido al lugar correcto. En este artículo, le diremos todo lo que necesita saber sobre la película de Avarampoo, el sitio web de Moviesda y cómo descargar o transmitir la película de Avarampoo en línea de forma gratuita. Así que, sin más preámbulos, empecemos.
Avarampoo (transl. Avaram senna) es una película de 1992 dirigida por Bharathan y producida por Keyaar. Es un remake de la película malayalam de 1980 Thakara, y protagonizada por Vineeth, Nandhini y Nassar en los papeles principales. La película fue lanzada el 5 de junio de 1992.
-
La trama de la película gira en torno a Sakkarai, un huérfano con problemas mentales que trabaja para el jefe de la aldea Thevar. Se enamora de la hija de su amo, Thamarai, pero se enfrenta a la oposición de Thevar y su segunda esposa. La película explora los temas de la inocencia, el amor, la traición y la injusticia social.
-
¿Qué es el sitio web de Moviesda?
-
Moviesda es un sitio web que ofrece una amplia gama de películas tamiles, incluyendo los últimos lanzamientos en alta calidad de definición. Su extenso catálogo y la posibilidad de acceder a las películas de forma gratuita ha dado a la plataforma una ventaja significativa en popularidad. Sin embargo, es importante tener en cuenta que Moviesda está asociado con la infracción de derechos de autor y la piratería.
-
Moviesda sube versiones piratas de películas en hindi, marathi, inglés, telugu, tamil y malayalam en su portal. Los usuarios pueden descargar o transmitir estas películas sin pagar ninguna cuota de suscripción o registro. Moviesda también ofrece varios formatos y calidades de películas como 300mb, 720p, 1080p, etc.
-
-
¿Por qué es popular la película de Avarampoo?
-
La película Avarampoo es popular entre los amantes del cine tamil por varias razones. Algunas de ellas son:
-
-
-
La película cuenta con Vineeth, Nandhini y Nassar, que son actores muy conocidos en el cine tamil. Vineeth is known for his roles in films such as Pudhiya Mugam, May Madham, Kaadhal Desam, etc. Nandhini is known for her roles in films such as Chinna Thambi Periya Thambi, Kizhakku Vasal, etc. Nassar is known for his roles in films such as Nayakan, Roja, Bombay, etc.
-
La película tiene una historia conmovedora y realista que representa la difícil situación de un niño con discapacidad mental que se enamora de una chica de una casta superior y se enfrenta a la ira de su familia y la sociedad.
-
La película tiene una música melodiosa y conmovedora compuesta por Ilaiyaraaja, considerado uno de los mejores compositores del cine indio. Ha ganado cinco National Film Awards y seis Tamil Nadu State Film Awards por su música.
-
La película ha recibido críticas positivas de críticos y audiencias por igual. Ha sido elogiado por su dirección, actuaciones, música, cinematografía y guion. También ha sido seleccionado para varios festivales de cine y premios.
-
-
¿Cómo descargar la película de Avarampoo de Moviesda?
-
Si desea descargar la película de Avarampoo desde Moviesda, debe seguir estos pasos:
-
Paso 1: Instalar una VPN y conectarse a un servidor donde Moviesda es accesible
-
Una VPN o una red privada virtual es un software que le permite acceder a sitios web bloqueados o restringidos cambiando su dirección IP y cifrando sus datos. Dado que Moviesda es un sitio web ilegal que puede estar prohibido o bloqueado en algunos países o regiones, debe usar una VPN para acceder a él de forma segura y anónima.
-
-
Paso 2: Visita el sitio web de Moviesda y busca películas de Avarampoo
-
Después de conectarse a un servidor VPN, debe visitar el sitio web de Moviesda usando su navegador. El sitio web puede tener diferentes nombres de dominio o extensiones debido a cambios frecuentes. Algunos de los posibles nombres de dominio son moviesda.com, moviesda.net, moviesda.in, etc. Puede utilizar un motor de búsqueda como Google o Bing para encontrar el último nombre de dominio de Moviesda.
-
Una vez que haya llegado al sitio web de Moviesda, debe buscar una película de Avarampoo usando la barra de búsqueda o las categorías. También puede navegar a través de las últimas subidas o la lista alfabética de películas. Puede encontrar múltiples resultados para la película de Avarampoo con diferentes fechas de lanzamiento, idiomas o versiones. Debe seleccionar el que coincida con su preferencia.
-
Paso 3: Elija la calidad y el formato de la película y haga clic en el enlace de descarga
-
Después de seleccionar la película de Avarampoo que desea descargar, debe elegir la calidad y el formato de la película que se adapte a su dispositivo y la velocidad de Internet. Moviesda ofrece varias calidades y formatos de películas como 300mb, 720p, 1080p, mp4, mkv, etc. También puede comprobar el tamaño del archivo y la duración de la película antes de descargarlo.
-
Una vez que haya elegido la calidad y el formato de la película, debe hacer clic en el enlace de descarga que lo redirigirá a otra página o sitio. Usted puede encontrar algunos anuncios emergentes o verificación de captcha en esta página o sitio. Necesitas cerrar los anuncios o completar la verificación para proceder con la descarga.
-
Paso 4: Espere a que la descarga se complete y disfrute de la película sin conexión
-
Después de hacer clic en el enlace de descarga, debe esperar a que se complete la descarga. La velocidad de descarga puede variar dependiendo de su conexión a Internet y la carga del servidor. Puede comprobar el progreso de la descarga en su navegador o gestor de descargas.
-
-
¿Cómo ver la película de Avarampoo en línea gratis?
-
Si no desea descargar la película de Avarampoo desde Moviesda o cualquier otro sitio web ilegal, puede verla en línea de forma gratuita utilizando algunas alternativas legales. Estas son algunas de las opciones que puedes probar:
-
Opción 1: Utilice una plataforma de streaming que ofrece películas de Avarampoo
-
Una plataforma de streaming es un sitio web o una aplicación que te permite ver películas y programas en línea sin descargarlos. Algunas de las plataformas de streaming son gratuitas, mientras que otras requieren una cuota de suscripción o registro. Algunas de las plataformas de streaming que ofrecen películas de Avarampoo son:
-
Tamilyogi
-
Tamilyogi es una plataforma de streaming gratuita que ofrece una gran colección de películas y programas tamiles. Puedes ver la película de Avarampoo en línea gratis en Tamilyogi sin ningún registro o suscripción. Sin embargo, es posible que tenga que lidiar con algunos anuncios molestos y ventanas emergentes mientras transmite la película. También puede elegir la calidad y el formato de la película según su preferencia.
-
YouTube
-
YouTube es una de las plataformas de streaming más populares y ampliamente utilizadas en el mundo. Ofrece una variedad de contenido incluyendo películas, programas, música, videos, etc. Puede ver la película de Avarampoo en línea de forma gratuita en YouTube si está disponible en la plataforma. También puede utilizar la barra de búsqueda o los filtros para encontrar la película fácilmente. También puede ajustar la calidad y la velocidad de la película según su conveniencia.
-
Opción 2: Utilice un sitio de torrent que tiene película de Avarampoo
-
Un sitio de torrent es un sitio web que le permite descargar o compartir archivos utilizando una red de igual a igual. Un archivo torrent es un archivo pequeño que contiene información sobre el archivo más grande que desea descargar. Necesita un cliente de torrent o software para descargar o cargar archivos usando un sitio de torrent. Algunos de los sitios de torrent que tienen una película de Avarampoo son:
-
Tamilrockers
-
-
Isaimini
-
Isaimini es otro popular sitio de torrents que ofrece una gran colección de películas y programas tamiles. Puede descargar la película de Avarampoo desde Isaimini utilizando un cliente de torrent como BitTorrent o uTorrent. Sin embargo, debe tener cuidado de que Isaimini también es un sitio web ilegal que puede estar sujeto a acciones legales o sanciones.
-
Conclusión
-
Resumen de los puntos principales
-
En este artículo, hemos discutido la película de Avarampoo, el sitio web de Moviesda, y cómo descargar o ver la película de Avarampoo en línea gratis. También hemos proporcionado algunas alternativas legales a Moviesda que puede utilizar para ver la película de Avarampoo en línea de forma gratuita. Esperamos que haya encontrado este artículo útil e informativo.
-
Descargo de responsabilidad y advertencia sobre piratería y cuestiones legales
-
Nos gustaría recordarle que descargar o transmitir películas piratas de sitios web ilegales como Moviesda es un delito y una violación de los derechos de propiedad intelectual. La piratería es un delito grave que puede dar lugar a acciones legales o sanciones. No respaldamos ni promovemos tales sitios web o actividades. Le aconsejamos respetar la ley y los derechos de los creadores y propietarios de las películas.
-
Llamada a la acción y petición de comentarios
-
Si te gustó este artículo, por favor compártelo con tus amigos y familiares que están interesados en ver la película de Avarampoo en línea gratis. También, por favor deje sus comentarios y comentarios a continuación. Nos encantaría saber de usted y mejorar nuestra calidad de contenido.
-
Preguntas frecuentes
-
-
Q: ¿Cuál es la calificación IMDb de la película de Avarampoo?
-
A: La calificación de IMDb de la película de Avarampoo es 7.1 de 10 basado en 35 comentarios de usuarios.
-
Q: ¿Quiénes son los cantantes de las canciones en la película de Avarampoo?
-
A: Los cantantes de las canciones en la película de Avarampoo son S.P.Balasubrahmanyam, K.S.Chithra, Mano, Swarnalatha, etc.
-
Q: ¿Dónde puedo ver una película de Avarampoo con subtítulos?
-
-
Q: ¿Cuánto dura una película de Avarampoo?
-
A: La película de Avarampoo dura 2 horas y 17 minutos.
-
Q: ¿Cuáles son algunas otras películas similares a la película de Avarampoo?
-
A: Algunas otras películas similares a la película de Avarampoo son Thakara (1980), Kadhal Kottai (1996), Kaadhal (2004), etc.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Betty Noir Fuente Descargar.md b/spaces/Benson/text-generation/Examples/Betty Noir Fuente Descargar.md
deleted file mode 100644
index c8f57807125b293caadc120ea6eaa17011c08d1b..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Betty Noir Fuente Descargar.md
+++ /dev/null
@@ -1,81 +0,0 @@
-
-
-
-
-
Betty Noir Font Download: Una tipografía retro y elegante para sus proyectos
-
Si usted está buscando una fuente que puede añadir un toque vintage y personalidad a sus proyectos, es posible que desee echa un vistazo a Betty Noir Font. Esta fuente está inspirada en las clásicas películas noir de Hollywood de las décadas de 1940 y 1950, y tiene un distintivo aspecto retro y elegante. En este artículo, le diremos todo lo que necesita saber sobre Betty Noir Font, incluyendo su historia, características, uso y fuentes de descarga. También te mostraremos algunos ejemplos de cómo se puede usar Betty Noir Font en diseño web y arte gráfico. ¡Empecemos!
Betty Noir Font es una fuente gratuita creada por Nate Piekos de Blambot Fonts. Fue lanzado por primera vez en 2004, y ha sido descargado más de 400.000 veces desde entonces. La fuente lleva el nombre de Betty Grable, una de las actrices más populares de la era negra. La fuente está diseñada para imitar el estilo de los títulos de películas y carteles de ese período de tiempo, con una mezcla de elementos art deco y script.
-
La historia y características de Betty Noir Fuente
-
El género negro surgió en la década de 1940 como una subcategoría de la ficción criminal y el cine. Fue influenciado por el expresionismo alemán, las novelas policíacas y la desilusión de la posguerra. Las historias noir típicamente presentaban protagonistas cínicos, mujeres fatales, autoridades corruptas y atmósferas oscuras. Algunas de las películas noir más famosas incluyen The Maltese Falcon, Double Indemnity, The Big Sleep, Sunset Boulevard y The Third Man.
-
-
Cómo usar Betty Noir Font en tus diseños
-
Betty Noir Font es una fuente versátil que se puede usar para varios propósitos. Aquí hay algunos consejos sobre cómo usarla eficazmente:
-
-
Utilizarlo para titulares, títulos, logotipos, banners, carteles, folletos, invitaciones, etiquetas, etc. Puede crear un fuerte impacto visual y atraer la atención.
-
Úselo con moderación para el texto del cuerpo o los párrafos largos. Puede ser difícil de leer en tamaños más pequeños o en fondos de bajo contraste.
-
Úsalo con fuentes complementarias que coincidan con su estilo y estado de ánimo. Por ejemplo, puede combinarlo con fuentes sans-serif como Arial o Verdana para un aspecto moderno o con fuentes serif como Times New Roman o Georgia para un aspecto clásico.
-
Úsalo con colores apropiados que realcen su ambiente retro. Por ejemplo, puede usar blanco y negro para un efecto negro o colores brillantes como rojo o amarillo para un efecto de arte pop.
-
Úsalo con imágenes que se adapten a su tema y género. Por ejemplo, puedes usar fotos o ilustraciones de coches antiguos, edificios
-
-
-
-
¿Dónde descargar Betty Noir Font?
-
Betty Noir Font es una fuente gratuita que puede descargar de varios sitios web. Sin embargo, no todas las fuentes son confiables o legales. Algunos pueden contener virus o malware que pueden dañar su computadora o spyware que puede robar su información personal. Por lo tanto, siempre debe descargar fuentes de fuentes legales y de buena reputación. Estos son algunos de los mejores sitios web donde se puede descargar Betty Noir Font de forma gratuita y legal:
-
-
[DaFont]( 1 ): Este es uno de los sitios web más populares para las fuentes gratuitas, con más de 40.000 fuentes para elegir. Puedes encontrar Betty Noir Font en la categoría Fantasía > Retro, o buscarlo por nombre. La fuente es gratuita para uso personal, pero debe ponerse en contacto con el diseñador para uso comercial.
-
-
[1001 Fonts]( 3 ): Este es un sitio web que ofrece fuentes gratuitas y premium, con más de 30.000 fuentes para elegir. Puede encontrar Betty Noir Font en la categoría Retro, o buscarlo por nombre. La fuente es gratuita para uso personal, pero no gratuita para uso comercial.
-
-
Antes de descargar cualquier fuente, asegúrese de leer los términos de la licencia cuidadosamente y seguirlos en consecuencia. Algunas fuentes pueden requerir atribución, donación o permiso del diseñador para ciertos usos.
-
Cómo instalar y aplicar Betty Noir Font en HTML
-
Una vez que haya descargado Betty Noir Font desde una fuente confiable, debe instalarlo en su computadora y aplicarlo en su código HTML. Estos son los pasos para hacerlo:
-
-
-
Descomprima el archivo de fuente y guárdelo en una carpeta en su computadora.
-
Abra el Panel de control y vaya a Fuentes. Arrastre y suelte el archivo de fuente en la carpeta Fuentes. Alternativamente, puede hacer clic derecho en el archivo de fuente y seleccionar Instalar.
-
Abra su editor HTML y cree un nuevo archivo o abra uno existente.
-
Agregue el siguiente código a la sección principal de su archivo HTML: @font-face { font-family: 'Betty Noir'; nbsp;src: url('bettynoir.ttf');
>face named Betty Noir and links it to the font file that you saved on your computer. Asegúrese de usar la ruta y el nombre correctos del archivo de fuente.
-
Agregue el siguiente código a la sección de estilo de su archivo HTML o en un archivo CSS separado: h1 font-family: 'Betty Noir', sans-serif;
Este código aplica la fuente Betty Noir a todos los elementos h1 en su archivo HTML. También puede usar otros selectores o propiedades para personalizar su estilo de fuente.
-
Guarda tu archivo HTML y ábrelo en un navegador para ver el resultado.
-
-
-
Ejemplos de Betty Noir Font en acción
-
Para darle alguna inspiración e ideas sobre cómo usar Betty Noir Font en sus proyectos, aquí hay algunos ejemplos de sitios web y obras de arte gráficas que utilizan esta fuente:
-
Sitios web que utilizan Betty Noir Font
-
-
[The Black Dahlia Murder]( 8 ): Este es un sitio web dedicado al caso de asesinato sin resolver de Elizabeth Short, también conocida como Black Dahlia, que fue asesinada en 1947 en Los Ángeles. El sitio web utiliza Betty Noir Font para su logotipo y titulares, creando una atmósfera noir.
-
[The Vintage News]( 9 ): Este es un sitio web que cubre historias y hechos de historia, cultura, ciencia, arte y más. El sitio web utiliza Betty Noir Font para su logotipo y algunos de sus titulares, dándole un aspecto retro y elegante.
-
[El arte de la hombría]( 10 ): Este es un sitio web que ofrece artículos, podcasts, videos y libros sobre temas relacionados con el estilo de vida de los hombres, como el aseo, la aptitud, el estilo, las relaciones, las habilidades y más. El sitio web utiliza Betty Noir Font para su logotipo y algunos de sus titulares, añadiendo algo de encanto vintage y personalidad.
-
-
Obras de arte gráficas que utilizan Betty Noir Font
-
-
[Noir Movie Poster]( 11 ): Esta es una obra gráfica creada por [Nate Piekos]( 12 ), el diseñador de Betty Noir Font. Es un póster de una película de cine negro llamado The Big Sleepover. Utiliza Betty Noir Font para el título y los créditos, junto con otras fuentes e imágenes para crear un póster realista y cautivador.
-
[Noir Book Cover]: Esta es una obra gráfica creada por [Jenny Zemanek], una diseñadora de portadas de libros. Es la portada de una novela negra llamada The Big Nothing de Paul D. Brazill. Utiliza Betty Noir Font para el título y el nombre del autor, junto con otras fuentes e imágenes para crear una cubierta misteriosa e intrigante.
-
-
-
Conclusión
-
Betty Noir Font es una fuente gratuita que puede añadir un toque retro y elegante a sus proyectos. Está inspirado en las clásicas películas de cine negro de Hollywood de las décadas de 1940 y 1950, y tiene un distintivo estilo art déco y guion. Puede descargar Betty Noir Font desde varios sitios web, pero asegúrese de usar fuentes confiables y legales. También puede instalar y aplicar Betty Noir Font en su código HTML con algunos pasos simples. Betty Noir Font se puede usar para varios propósitos, como titulares, logotipos, carteles, pancartas, etc. También puede combinarlo con fuentes complementarias, colores, imágenes y temas para crear diseños impresionantes. Esperamos que este artículo te haya dado información útil e inspiración sobre cómo usar Betty Noir Font en tus proyectos.
-
Resumen de los puntos principales
-
-
Betty Noir Font es una fuente gratuita creada por Nate Piekos de Blambot Fonts en 2004.
-
Betty Noir Font está inspirada en las clásicas películas noir de Hollywood de los años 40 y 50, y tiene un aspecto retro y elegante.
-
Betty Noir Font se puede descargar desde varios sitios web, pero siempre debe utilizar fuentes legales y de buena reputación.
-
Betty Noir Font se puede instalar y aplicar en su código HTML con algunos pasos simples.
-
Betty Noir Font se puede utilizar para diversos fines, como titulares, logotipos, carteles, banners, etc.
-
Betty Noir Font se puede combinar con fuentes complementarias, colores, imágenes y temas para crear diseños impresionantes.
-
-
Llamada a la acción y retroalimentación
-
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas más frecuentes sobre Betty Noir Font:
-
-
Q: ¿Es Betty Noir fuente libre? A: Sí, Betty Noir Font es gratis para uso personal, pero es posible que tenga que ponerse en contacto con el diseñador para uso comercial.
-
P: ¿Qué tipo de fuente es Betty Noir Font? A: Betty Noir Font es una fuente retro inspirada en las clásicas películas de cine negro de Hollywood de los años 40 y 50.
-
P: ¿Cómo descargo Betty Noir Font? A: Puede descargar Betty Noir Font desde varios sitios web, pero asegúrese de usar fuentes confiables y legales.
-
Q: ¿Cómo puedo instalar y aplicar Betty Noir Font en HTML? A: Necesitas descomprimir el archivo de fuente, guardarlo en una carpeta en tu computadora, instalarlo en tu carpeta de fuentes, definirlo en tu código HTML usando @font-face, y aplicarlo a tus elementos usando font-family.
-
P: ¿Cómo uso Betty Noir Font en mis diseños? A: Puede usar Betty Noir Font para varios propósitos, como titulares, logotipos, carteles, banners, etc. También puede combinarlo con fuentes complementarias, colores, imágenes y temas para crear diseños impresionantes.
-
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Cubic 234 Juegos De Jugadores Mod Apk.md b/spaces/Benson/text-generation/Examples/Cubic 234 Juegos De Jugadores Mod Apk.md
deleted file mode 100644
index 1cabe063bde4de5e598252924817d82922d74e99..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Cubic 234 Juegos De Jugadores Mod Apk.md
+++ /dev/null
@@ -1,86 +0,0 @@
-
-
Cubic 234 Juegos de jugadores Mod APK: Disfruta de la diversión y adictivo Mini juegos con amigos
-
Introducción
-
¿Te encanta jugar mini juegos con tus amigos y familiares? ¿Quieres divertirte con juegos sencillos y emocionantes que puedas disfrutar en tu dispositivo móvil? Si usted respondió que sí, entonces usted debe definitivamente echa un vistazo Cubic 234 Player Games Mod APK, una colección de increíbles mini juegos que se puede jugar con hasta 6 jugadores en un solo partido.
Cubic 234 Player Games es un juego para móviles desarrollado por CubeCube Sports, un estudio especializado en crear juegos divertidos y adictivos para todas las edades. El juego cuenta con varios mini juegos que puedes jugar con 2, 3 o 4 jugadores en el mismo dispositivo. También puedes unirte al modo torneo y competir con hasta 6 jugadores online. Los mini juegos son simples pero desafiantes, y pondrán a prueba tus habilidades, reflejos y coordinación. Algunos de los mini juegos incluyen:
-
-
Snake Arena: Controla tu serpiente y tratar de comer tantas manzanas como sea posible evitando las paredes y otras serpientes.
-
ludo: un clásico juego de mesa donde tienes que mover tus cuatro piezas al centro del tablero antes que tus oponentes.
-
Fútbol: Un juego de fútbol donde tienes que anotar más goles que tu oponente pateando la pelota en la red.
-
batalla de tanques: un juego de tanques donde tienes que disparar el tanque de tu enemigo mientras esquiva sus balas.
-
Sumo Wrestling: Un juego de lucha libre donde tienes que empujar a tu oponente fuera del ring usando tu peso corporal.
-
¡Y muchos más!
-
-
¿Por qué descargar Cubic 234 Player Games Mod APK?
-
Si se está preguntando por qué debe descargar Cubic 234 Player Games Mod APK en lugar de la versión original de la Google Play Store, aquí hay algunas razones:
-
-
Obtendrá dinero ilimitado que puede utilizar para comprar nuevas pieles, sombreros y accesorios para sus personajes.
-
-
Te librarás de los molestos anuncios que pueden interrumpir tu juego o agotar tu batería.
-
Obtendrá un mejor rendimiento y estabilidad en su dispositivo ya que la versión modificada está optimizada para dispositivos de gama baja.
-
-
Características de Cubic 234 Player Games Mod APK
-
Múltiples modos de juego y mini juegos para elegir
-
Una de las mejores características de Cubic 234 Player Games Mod APK es que ofrece una variedad de modos de juego y mini juegos que puedes jugar con tus amigos. Puedes elegir entre diferentes categorías como acción, árcade, tablero, deportes, rompecabezas y más. También puedes personalizar el nivel de dificultad, el número de rondas y el límite de tiempo para cada juego. Estos son algunos de los modos de juego que puedes disfrutar:
-
-
modo de jugador 2
-
Este modo te permite jugar con un amigo en el mismo dispositivo. Puedes elegir entre más de 20 mini juegos diseñados para dos jugadores. Puedes usar uno o dos botones para controlar a tu personaje dependiendo del juego. Algunos de los mini juegos que puedes jugar en este modo son:
-
-
ping pong: un juego de ping pong donde tienes que golpear la pelota con la paleta y hacer que rebote en el lado de tu oponente.
-
Tijeras de papel de roca: un juego clásico donde tienes que elegir entre piedra, papel o tijeras y ver quién gana.
-
examen de matemáticas: un juego de matemáticas donde tienes que resolver ecuaciones simples más rápido que tu oponente.
-
¡Y muchos más!
-
-
3 Modo reproductor
-
Este modo te permite jugar con dos amigos en el mismo dispositivo. Puedes elegir entre más de 10 mini juegos diseñados para tres jugadores. Puedes usar uno o dos botones para controlar a tu personaje dependiendo del juego. Algunos de los mini juegos que puedes jugar en este modo son:
-
-
Tron: un juego tron donde tienes que conducir tu ciclo de luz y evitar chocar contra las paredes o los senderos de otros jugadores.
-
-
carrera: un juego de carreras donde tienes que tocar el botón lo más rápido posible para hacer que su coche se mueva más rápido y llegar a la línea de meta primero.
-
¡Y muchos más!
-
-
4 Modo reproductor
-
Este modo te permite jugar con tres amigos en el mismo dispositivo. Puedes elegir entre más de 10 mini juegos diseñados para cuatro jugadores. Puedes usar uno o dos botones para controlar a tu personaje dependiendo del juego. Algunos de los mini juegos que puedes jugar en este modo son:
-
-
escuadrón de bombas: un juego de bombas donde tienes que desactivar la bomba antes de que explote cortando el cable derecho.
-
Poker: Un juego de póquer donde tienes que apostar, farolear y ganar con la mejor mano de cartas.
-
Memoria: Un juego de memoria donde tienes que hacer coincidir pares de cartas al voltearlas y recordar sus ubicaciones.
-
¡Y muchos más!
-
-
Modo de torneo
-
Este modo le permite jugar con hasta 6 jugadores en línea. Puede unirse o crear una habitación e invitar a sus amigos o jugadores al azar a unirse. También puedes chatear con otros jugadores y enviar emojis. Puedes elegir entre más de 30 minijuegos seleccionados al azar para cada ronda. El jugador con más puntos al final del torneo gana.
-
Controles simples e intuitivos para un juego fácil
-
Otra gran característica de Cubic 234 Player Games Mod APK es que tiene controles simples e intuitivos que hacen que sea fácil para cualquiera jugar. No necesitas gestos complicados ni golpes para controlar a tu personaje. Solo tienes que pulsar uno o dos botones dependiendo del juego. Los botones son grandes y sensibles, y se colocan en las esquinas de la pantalla para que no interfieran con el juego. También puede ajustar los ajustes de sensibilidad y vibración según su preferencia.
-
Gráficos coloridos y caricaturescos para un estado de ánimo alegre
-
-
Dinero ilimitado y funciones desbloqueadas para más diversión
-
La mejor característica de Cubic 234 Player Games Mod APK es que le da dinero ilimitado y desbloqueado características que hacen que el juego más divertido y agradable. Con dinero ilimitado, puedes comprar nuevas pieles, sombreros y accesorios para tus personajes. También puedes desbloquear todos los mini juegos y modos de juego sin tener que gastar dinero real o ver anuncios. También puedes eliminar todos los anuncios que puedan molestarte o ralentizar tu dispositivo. Con estas características, puede tener más opciones y personalización para su juego.
-
Cómo descargar e instalar Cubic 234 Player Games Mod APK
-
Si desea descargar e instalar Cubic 234 Player Games Mod APK en su dispositivo, aquí están los pasos que debe seguir:
-
Paso 1: Descargar el archivo APK de una fuente de confianza
-
El primer paso es descargar el archivo APK de una fuente de confianza. Puede utilizar el siguiente enlace para descargar la última versión de Cubic 234 Player Games Mod APK gratis. Asegúrate de tener suficiente espacio de almacenamiento en tu dispositivo antes de descargar el archivo.
Paso 2: Habilitar fuentes desconocidas en la configuración del dispositivo
-
El segundo paso es habilitar fuentes desconocidas en la configuración de su dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo y busque la opción de seguridad o privacidad. Luego, busque la opción de fuentes desconocidas y conéctela. Puede ver un mensaje de advertencia que le indica los riesgos de instalar aplicaciones desconocidas, pero puede ignorarlo y continuar.
-
Paso 3: Instalar el archivo APK y lanzar el juego
-
-
Conclusión
-
Cubic 234 Player Games Mod APK es un juego divertido y adictivo que puedes jugar con tus amigos en tu dispositivo móvil. Ofrece una variedad de mini juegos que puedes jugar con 2, 3 o 4 jugadores en el mismo dispositivo, o con hasta 6 jugadores en línea. El juego tiene controles simples e intuitivos, gráficos coloridos y de dibujos animados, dinero ilimitado y funciones desbloqueadas. Puede descargar e instalar Cubic 234 Player Games Mod APK gratis siguiendo los pasos anteriores. Entonces, ¿qué estás esperando? ¡Descarga Cubic 234 Player Games Mod APK ahora y diviértete con tus amigos!
-
Preguntas frecuentes
-
Aquí están algunas de las preguntas más frecuentes sobre Cubic 234 Player Games Mod APK:
-
-
Q: ¿Es Cubic 234 Player Games Mod APK seguro de usar?
-
A: Sí, Cubic 234 Player Games Mod APK es seguro de usar, siempre y cuando se descarga desde una fuente de confianza. La versión modificada no contiene ningún virus o malware que pueda dañar su dispositivo o datos.
-
Q: ¿Necesito una conexión a Internet para jugar Cubic 234 Player Games Mod APK?
-
A: No, no necesitas una conexión a Internet para jugar Cubic 234 Player Games Mod APK. Puedes jugar sin conexión con tus amigos en el mismo dispositivo. Sin embargo, si quieres jugar online con otros jugadores, necesitarás una conexión a Internet.
-
Q: ¿Cuántos mini juegos están disponibles en Cubic 234 Player Games Mod APK?
-
A: Hay más de 60 mini juegos disponibles en Cubic 234 Player Games Mod APK, dividido en diferentes categorías como acción, árcade, tablero, deportes, rompecabezas y más. Puedes jugar a todos ellos sin tener que desbloquearlos o pagar por ellos.
-
Q: ¿Puedo personalizar mi personaje en Cubic 234 Player Games Mod APK?
-
A: Sí, puede personalizar su personaje en Cubic 234 Player Games Mod APK comprando nuevas pieles, sombreros y accesorios con dinero ilimitado. También puedes cambiar el color de tu personaje según tu preferencia.
-
-
A: Sí, puede chatear con otros jugadores en Cubic 234 Player Games Mod APK mediante el uso de la función de chat en el juego. También puede enviar emojis y pegatinas para expresar sus emociones.
")
-
- openai_api_key_textbox = gr.Textbox(
- placeholder="Paste your OpenAI API key (sk-...)",
- show_label=False,
- lines=1,
- type="password",
- )
-
- chatbot = gr.Chatbot()
-
- with gr.Row():
- message = gr.Textbox(
- label="What would you like to learn about marine biology?",
- placeholder="What is an estuary?",
- lines=1,
- )
- submit = gr.Button(value="Send", variant="secondary").style(full_width=False)
-
- gr.Examples(
- examples=[
- "What are phytoplankton?",
- "How does microplastic pollution affect the oceans?",
- "What are artificial reefs?",
- ],
- inputs=message,
- )
-
- gr.HTML(
- """
- This simple application is an implementation of ChatGPT but over an external dataset (in this case, Wikipedia pages on Marine biology)."""
- )
-
- gr.HTML(
- "
", unsafe_allow_html=True)
\ No newline at end of file
diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/README.md b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/README.md
deleted file mode 100644
index 02892bc9dd4344e550596d238e2b71870cfc7dd3..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/README.md
+++ /dev/null
@@ -1,220 +0,0 @@
-# vakyansh-tts
-Text to Speech for Indic languages
-
-## 1. Installation and Setup for training
-
-Clone repo
-Note : for multspeaker glow-tts training use branch [multispeaker](https://github.com/Open-Speech-EkStep/vakyansh-tts/tree/multispeaker)
-```
-git clone https://github.com/Open-Speech-EkStep/vakyansh-tts
-```
-Build conda virtual environment
-```
-cd ./vakyansh-tts
-conda create --name python=3.7
-conda activate
-pip install -r requirements.txt
-```
-Install [apex](https://github.com/NVIDIA/apex); commit: 37cdaf4 for Mixed-precision training
-
-Note : used only for glow-tts
-```
-cd ..
-git clone https://github.com/NVIDIA/apex
-cd apex
-git checkout 37cdaf4
-pip install -v --disable-pip-version-check --no-cache-dir ./
-cd ../vakyansh-tts
-```
-Build Monotonic Alignment Search Code (Cython)
-
-Note : used only for glow-tts
-```
-bash install.sh
-```
-
-## 2. Data Resampling
-
-The data format should have a folder containing all the .wav files for glow-tts and a text file containing filenames with their sentences.
-
-Directory structure:
-
-langauge_folder_name
-```
-language_folder_name
-|-- ./wav/*.wav
-|-- ./text_file_name.txt
-```
-The format for text_file_name.txt (Text file is only needed for glow-tts training)
-
-```
-( audio1.wav "Sentence1." )
-( audio2.wav "Sentence2." )
-```
-
-To resample the .wav files to 22050 sample rate, change the following parameters in the vakyansh-tts/scripts/data/resample.sh
-
-```
-input_wav_path : absolute path to wav file folder in vakyansh_tts/data/
-output_wav_path : absolute path to vakyansh_tts/data/resampled_wav_folder_name
-output_sample_rate : 22050 (or any other desired sample rate)
-```
-
-To run:
-```bash
-cd scripts/data/
-bash resample.sh
-```
-
-
-## 3. Spectogram Training (glow-tts)
-
-### 3.1 Data Preparation
-
-
-To prepare the data edit the vakyansh-tts/scripts/glow/prepare_data.sh file and change the following parameters
-```
-input_text_path : absolute path to vakyansh_tts/data/text_file_name.txt
-input_wav_path : absolute path to vakyansh_tts/data/resampled_wav_folder_name
-gender : female or male voice
-```
-To run:
-```bash
-cd scripts/glow/
-bash prepare_data.sh
-```
-### 3.2 Training glow-tts
-
-To start the spectogram-training edit the vakyansh-tts/scripts/glow/train_glow.sh file and change the following parameter:
-```
-gender : female or male voice
-```
-Make sure that the gender is same as that of the prepare_data.sh file
-
-To start the training, run:
-```bash
-cd scripts/glow/
-bash train_glow.sh
-```
-## 4. Vocoder Training (hifi-gan)
-
-### 4.1 Data Preparation
-
-To prepare the data edit the vakyansh-tts/scripts/hifi/prepare_data.sh file and change the following parameters
-```
-input_wav_path : absolute path to vakyansh_tts/data/resampled_wav_folder_name
-gender : female or male voice
-```
-To run:
-```bash
-cd scripts/hifi/
-bash prepare_data.sh
-```
-### 4.2 Training hifi-gan
-
-To start the spectogram-training edit the vakyansh-tts/scripts/hifi/train_hifi.sh file and change the following parameter:
-```
-gender : female or male voice
-```
-Make sure that the gender is same as that of the prepare_data.sh file
-
-To start the training, run:
-```bash
-cd scripts/hifi/
-bash train_hifi.sh
-```
-
-## 5. Inference
-
-### 5.1 Using Gradio
-
-To use the gradio link edit the following parameters in the vakyansh-tts/scripts/inference/gradio.sh file:
-```
-gender : female or male voice
-device : cpu or cuda
-lang : langauge code
-```
-
-To run:
-```bash
-cd scripts/inference/
-bash gradio.sh
-```
-### 5.2 Using fast API
-To use the fast api link edit the parameters in the vakyansh-tts/scripts/inference/api.sh file similar to section 5.1
-
-To run:
-```bash
-cd scripts/inference/
-bash api.sh
-```
-
-### 5.3 Direct Inference using text
-To infer, edit the parameters in the vakyansh-tts/scripts/inference/infer.sh file similar to section 5.1 and set the text to the text variable
-
-To run:
-```bash
-cd scripts/inference/
-bash infer.sh
-```
-
-To configure other parameters there is a version that runs the advanced inference as well. Additional Parameters:
-```
-noise_scale : can vary from 0 to 1 for noise factor
-length_scale : can vary from 0 to 2 for changing the speed of the generated audio
-transliteration : whether to switch on/off transliteration. 1: ON, 0: OFF
-number_conversion : whether to switch on/off number to words conversion. 1: ON, 0: OFF
-split_sentences : whether to switch on/off splitting of sentences. 1: ON, 0: OFF
-```
-To run:
-```
-cd scripts/inference/
-bash advanced_infer.sh
-```
-
-### 5.4 Installation of tts_infer package
-
-In tts_infer package, we currently have two components:
-
- 1. Transliteration (AI4bharat's open sourced models) (Languages supported: {'hi', 'gu', 'mr', 'bn', 'te', 'ta', 'kn', 'pa', 'gom', 'mai', 'ml', 'sd', 'si', 'ur'} )
-
- 2. Num to Word (Languages supported: {'en', 'hi', 'gu', 'mr', 'bn', 'te', 'ta', 'kn', 'or', 'pa'} )
-```
-git clone https://github.com/Open-Speech-EkStep/vakyansh-tts
-cd vakyansh-tts
-bash install.sh
-python setup.py bdist_wheel
-pip install -e .
-cd tts_infer
-gsutil -m cp -r gs://vakyaansh-open-models/translit_models .
-```
-
-Usage: Refer to example file in tts_infer/
-```
-from tts_infer.tts import TextToMel, MelToWav
-from tts_infer.transliterate import XlitEngine
-from tts_infer.num_to_word_on_sent import normalize_nums
-
-import re
-from scipy.io.wavfile import write
-
-text_to_mel = TextToMel(glow_model_dir='/path/to/glow-tts/checkpoint/dir', device='cuda')
-mel_to_wav = MelToWav(hifi_model_dir='/path/to/hifi/checkpoint/dir', device='cuda')
-
-def translit(text, lang):
- reg = re.compile(r'[a-zA-Z]')
- engine = XlitEngine(lang)
- words = [engine.translit_word(word, topk=1)[lang][0] if reg.match(word) else word for word in text.split()]
- updated_sent = ' '.join(words)
- return updated_sent
-
-def run_tts(text, lang):
- text = text.replace('।', '.') # only for hindi models
- text_num_to_word = normalize_nums(text, lang) # converting numbers to words in lang
- text_num_to_word_and_transliterated = translit(text_num_to_word, lang) # transliterating english words to lang
-
- mel = text_to_mel.generate_mel(text_num_to_word_and_transliterated)
- audio, sr = mel_to_wav.generate_wav(mel)
- write(filename='temp.wav', rate=sr, data=audio) # for saving wav file, if needed
- return (sr, audio)
-```
diff --git a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/hifi/env.py b/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/hifi/env.py
deleted file mode 100644
index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000
--- a/spaces/Harveenchadha/Vakyansh-Malayalam-TTS/ttsv/src/glow_tts/hifi/env.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-import shutil
-
-
-class AttrDict(dict):
- def __init__(self, *args, **kwargs):
- super(AttrDict, self).__init__(*args, **kwargs)
- self.__dict__ = self
-
-
-def build_env(config, config_name, path):
- t_path = os.path.join(path, config_name)
- if config != t_path:
- os.makedirs(path, exist_ok=True)
- shutil.copyfile(config, os.path.join(path, config_name))
diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/components.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/components.py
deleted file mode 100644
index ae41c535846c530643965cf969bd438a54295469..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/components.py
+++ /dev/null
@@ -1,5068 +0,0 @@
-"""Contains all of the components that can be used with Gradio Interface / Blocks.
-Along with the docs for each component, you can find the names of example demos that use
-each component. These demos are located in the `demo` directory."""
-
-from __future__ import annotations
-
-import inspect
-import json
-import math
-import operator
-import random
-import tempfile
-import uuid
-import warnings
-from copy import deepcopy
-from enum import Enum
-from pathlib import Path
-from types import ModuleType
-from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
-
-import altair as alt
-import matplotlib.figure
-import numpy as np
-import pandas as pd
-import PIL
-import PIL.ImageOps
-from ffmpy import FFmpeg
-from markdown_it import MarkdownIt
-from mdit_py_plugins.dollarmath.index import dollarmath_plugin
-from pandas.api.types import is_numeric_dtype
-from PIL import Image as _Image # using _ to minimize namespace pollution
-from typing_extensions import Literal
-
-from gradio import media_data, processing_utils, utils
-from gradio.blocks import Block, BlockContext
-from gradio.context import Context
-from gradio.documentation import document, set_documentation_group
-from gradio.events import (
- Blurrable,
- Changeable,
- Clearable,
- Clickable,
- Editable,
- Playable,
- Streamable,
- Submittable,
- Uploadable,
-)
-from gradio.layouts import Column, Form, Row
-from gradio.processing_utils import TempFileManager
-from gradio.serializing import (
- FileSerializable,
- ImgSerializable,
- JSONSerializable,
- Serializable,
- SimpleSerializable,
-)
-
-if TYPE_CHECKING:
- from typing import TypedDict
-
- class DataframeData(TypedDict):
- headers: List[str]
- data: List[List[str | int | bool]]
-
-
-set_documentation_group("component")
-_Image.init() # fixes https://github.com/gradio-app/gradio/issues/2843
-
-
-class _Keywords(Enum):
- NO_VALUE = "NO_VALUE" # Used as a sentinel to determine if nothing is provided as a argument for `value` in `Component.update()`
- FINISHED_ITERATING = "FINISHED_ITERATING" # Used to skip processing of a component's value (needed for generators + state)
-
-
-class Component(Block):
- """
- A base class for defining the methods that all gradio components should have.
- """
-
- def __str__(self):
- return self.__repr__()
-
- def __repr__(self):
- return f"{self.get_block_name()}"
-
- def get_config(self):
- """
- :return: a dictionary with context variables for the javascript file associated with the context
- """
- return {
- "name": self.get_block_name(),
- **super().get_config(),
- }
-
- def preprocess(self, x: Any) -> Any:
- """
- Any preprocessing needed to be performed on function input.
- """
- return x
-
- def postprocess(self, y):
- """
- Any postprocessing needed to be performed on function output.
- """
- return y
-
- def style(
- self,
- *,
- container: bool | None = None,
- **kwargs,
- ):
- """
- This method can be used to change the appearance of the component.
- Parameters:
- container: If True, will place the component in a container - providing some extra padding around the border.
- """
- put_deprecated_params_in_box = False
- if "rounded" in kwargs:
- warnings.warn(
- "'rounded' styling is no longer supported. To round adjacent components together, place them in a Column(variant='box')."
- )
- if isinstance(kwargs["rounded"], list) or isinstance(
- kwargs["rounded"], tuple
- ):
- put_deprecated_params_in_box = True
- kwargs.pop("rounded")
- if "margin" in kwargs:
- warnings.warn(
- "'margin' styling is no longer supported. To place adjacent components together without margin, place them in a Column(variant='box')."
- )
- if isinstance(kwargs["margin"], list) or isinstance(
- kwargs["margin"], tuple
- ):
- put_deprecated_params_in_box = True
- kwargs.pop("margin")
- if "border" in kwargs:
- warnings.warn(
- "'border' styling is no longer supported. To place adjacent components in a shared border, place them in a Column(variant='box')."
- )
- kwargs.pop("border")
- if container is not None:
- self._style["container"] = container
- if len(kwargs):
- for key in kwargs:
- warnings.warn(f"Unknown style parameter: {key}")
- if put_deprecated_params_in_box and isinstance(self.parent, (Row, Column)):
- if self.parent.variant == "default":
- self.parent.variant = "compact"
- return self
-
-
-class IOComponent(Component, Serializable):
- """
- A base class for defining methods that all input/output components should have.
- """
-
- def __init__(
- self,
- *,
- value: Any = None,
- label: str | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- load_fn: Callable | None = None,
- every: float | None = None,
- **kwargs,
- ):
- super().__init__(elem_id=elem_id, visible=visible, **kwargs)
-
- self.label = label
- self.show_label = show_label
- self.interactive = interactive
-
- self.load_event = None
- self.load_event_to_attach = None
- load_fn, initial_value = self.get_load_fn_and_initial_value(value)
- self.value = (
- initial_value
- if self._skip_init_processing
- else self.postprocess(initial_value)
- )
- if callable(load_fn):
- self.load_event = self.attach_load_event(load_fn, every)
-
- self.set_interpret_parameters()
-
- def get_config(self):
- return {
- "label": self.label,
- "show_label": self.show_label,
- "interactive": self.interactive,
- **super().get_config(),
- }
-
- def set_interpret_parameters(self):
- """
- Set any parameters for interpretation.
- """
- return self
-
- def get_interpretation_neighbors(self, x: Any) -> Tuple[List, Dict, bool]:
- """
- Generates values similar to input to be used to interpret the significance of the input in the final output.
- Parameters:
- x: Input to interface
- Returns: (neighbor_values, interpret_kwargs, interpret_by_removal)
- neighbor_values: Neighboring values to input x to compute for interpretation
- interpret_kwargs: Keyword arguments to be passed to get_interpretation_scores
- interpret_by_removal: If True, returned neighbors are values where the interpreted subsection was removed. If False, returned neighbors are values where the interpreted subsection was modified to a different value.
- """
- return [], {}, True
-
- def get_interpretation_scores(
- self, x: Any, neighbors: List[Any], scores: List[float], **kwargs
- ) -> List:
- """
- Arrange the output values from the neighbors into interpretation scores for the interface to render.
- Parameters:
- x: Input to interface
- neighbors: Neighboring values to input x used for interpretation.
- scores: Output value corresponding to each neighbor in neighbors
- Returns:
- Arrangement of interpretation scores for interfaces to render.
- """
- return []
-
- def generate_sample(self) -> Any:
- """
- Returns a sample value of the input that would be accepted by the api. Used for api documentation.
- """
- pass
-
- @staticmethod
- def add_interactive_to_config(config, interactive):
- if interactive is not None:
- config["mode"] = "dynamic" if interactive else "static"
- return config
-
- @staticmethod
- def get_load_fn_and_initial_value(value):
- if callable(value):
- initial_value = value()
- load_fn = value
- else:
- initial_value = value
- load_fn = None
- return load_fn, initial_value
-
- def attach_load_event(self, callable: Callable, every: float | None):
- """Add a load event that runs `callable`, optionally every `every` seconds."""
- if Context.root_block:
- return Context.root_block.load(
- callable,
- None,
- self,
- no_target=True,
- every=every,
- )
- else:
- self.load_event_to_attach = (callable, every)
-
- def as_example(self, input_data):
- """Return the input data in a way that can be displayed by the examples dataset component in the front-end."""
- return input_data
-
-
-class FormComponent:
- def get_expected_parent(self) -> Type[Form]:
- return Form
-
-
-@document("change", "submit", "blur", "style")
-class Textbox(
- FormComponent, Changeable, Submittable, Blurrable, IOComponent, SimpleSerializable
-):
- """
- Creates a textarea for user to enter string input or display string output.
- Preprocessing: passes textarea value as a {str} into the function.
- Postprocessing: expects a {str} returned from function and sets textarea value to it.
- Examples-format: a {str} representing the textbox input.
-
- Demos: hello_world, diff_texts, sentence_builder
- Guides: creating_a_chatbot, real_time_speech_recognition
- """
-
- def __init__(
- self,
- value: str | Callable | None = "",
- *,
- lines: int = 1,
- max_lines: int = 20,
- placeholder: str | None = None,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- type: str = "text",
- **kwargs,
- ):
- """
- Parameters:
- value: default text to provide in textarea. If callable, the function will be called whenever the app loads to set the initial value of the component.
- lines: minimum number of line rows to provide in textarea.
- max_lines: maximum number of line rows to provide in textarea.
- placeholder: placeholder hint to provide behind textarea.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, will be rendered as an editable textbox; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- type: The type of textbox. One of: 'text', 'password', 'email', Default is 'text'.
- """
- if type not in ["text", "password", "email"]:
- raise ValueError('`type` must be one of "text", "password", or "email".')
-
- #
- self.lines = lines
- self.max_lines = max_lines if type == "text" else 1
- self.placeholder = placeholder
- self.interpret_by_tokens = True
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
- self.cleared_value = ""
- self.test_input = value
- self.type = type
-
- def get_config(self):
- return {
- "lines": self.lines,
- "max_lines": self.max_lines,
- "placeholder": self.placeholder,
- "value": self.value,
- "type": self.type,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- lines: int | None = None,
- max_lines: int | None = None,
- placeholder: str | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- interactive: bool | None = None,
- type: str | None = None,
- ):
- updated_config = {
- "lines": lines,
- "max_lines": max_lines,
- "placeholder": placeholder,
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "type": type,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def generate_sample(self) -> str:
- return "Hello World"
-
- def preprocess(self, x: str | None) -> str | None:
- """
- Preprocesses input (converts it to a string) before passing it to the function.
- Parameters:
- x: text
- Returns:
- text
- """
- return None if x is None else str(x)
-
- def postprocess(self, y: str | None) -> str | None:
- """
- Postproccess the function output y by converting it to a str before passing it to the frontend.
- Parameters:
- y: function output to postprocess.
- Returns:
- text
- """
- return None if y is None else str(y)
-
- def set_interpret_parameters(
- self, separator: str = " ", replacement: str | None = None
- ):
- """
- Calculates interpretation score of characters in input by splitting input into tokens, then using a "leave one out" method to calculate the score of each token by removing each token and measuring the delta of the output value.
- Parameters:
- separator: Separator to use to split input into tokens.
- replacement: In the "leave one out" step, the text that the token should be replaced with. If None, the token is removed altogether.
- """
- self.interpretation_separator = separator
- self.interpretation_replacement = replacement
- return self
-
- def tokenize(self, x: str) -> Tuple[List[str], List[str], None]:
- """
- Tokenizes an input string by dividing into "words" delimited by self.interpretation_separator
- """
- tokens = x.split(self.interpretation_separator)
- leave_one_out_strings = []
- for index in range(len(tokens)):
- leave_one_out_set = list(tokens)
- if self.interpretation_replacement is None:
- leave_one_out_set.pop(index)
- else:
- leave_one_out_set[index] = self.interpretation_replacement
- leave_one_out_strings.append(
- self.interpretation_separator.join(leave_one_out_set)
- )
- return tokens, leave_one_out_strings, None
-
- def get_masked_inputs(
- self, tokens: List[str], binary_mask_matrix: List[List[int]]
- ) -> List[str]:
- """
- Constructs partially-masked sentences for SHAP interpretation
- """
- masked_inputs = []
- for binary_mask_vector in binary_mask_matrix:
- masked_input = np.array(tokens)[np.array(binary_mask_vector, dtype=bool)]
- masked_inputs.append(self.interpretation_separator.join(masked_input))
- return masked_inputs
-
- def get_interpretation_scores(
- self, x, neighbors, scores: List[float], tokens: List[str], masks=None, **kwargs
- ) -> List[Tuple[str, float]]:
- """
- Returns:
- Each tuple set represents a set of characters and their corresponding interpretation score.
- """
- result = []
- for token, score in zip(tokens, scores):
- result.append((token, score))
- result.append((self.interpretation_separator, 0))
- return result
-
-
-@document("change", "submit", "style")
-class Number(
- FormComponent, Changeable, Submittable, Blurrable, IOComponent, SimpleSerializable
-):
- """
- Creates a numeric field for user to enter numbers as input or display numeric output.
- Preprocessing: passes field value as a {float} or {int} into the function, depending on `precision`.
- Postprocessing: expects an {int} or {float} returned from the function and sets field value to it.
- Examples-format: a {float} or {int} representing the number's value.
-
- Demos: tax_calculator, titanic_survival, blocks_simple_squares
- """
-
- def __init__(
- self,
- value: float | Callable | None = None,
- *,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- precision: int | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: default value. If callable, the function will be called whenever the app loads to set the initial value of the component.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, will be editable; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- precision: Precision to round input/output to. If set to 0, will round to nearest integer and covert type to int. If None, no rounding happens.
- """
- self.precision = precision
- self.interpret_by_tokens = False
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
- self.test_input = self.value if self.value is not None else 1
-
- @staticmethod
- def _round_to_precision(num: float | int, precision: int | None) -> float | int:
- """
- Round to a given precision.
-
- If precision is None, no rounding happens. If 0, num is converted to int.
-
- Parameters:
- num: Number to round.
- precision: Precision to round to.
- Returns:
- rounded number
- """
- if precision is None:
- return float(num)
- elif precision == 0:
- return int(round(num, precision))
- else:
- return round(num, precision)
-
- def get_config(self):
- return {
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: float | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def preprocess(self, x: float | None) -> float | None:
- """
- Parameters:
- x: numeric input
- Returns:
- number representing function input
- """
- if x is None:
- return None
- return self._round_to_precision(x, self.precision)
-
- def postprocess(self, y: float | None) -> float | None:
- """
- Any postprocessing needed to be performed on function output.
-
- Parameters:
- y: numeric output
- Returns:
- number representing function output
- """
- if y is None:
- return None
- return self._round_to_precision(y, self.precision)
-
- def set_interpret_parameters(
- self, steps: int = 3, delta: float = 1, delta_type: str = "percent"
- ):
- """
- Calculates interpretation scores of numeric values close to the input number.
- Parameters:
- steps: Number of nearby values to measure in each direction (above and below the input number).
- delta: Size of step in each direction between nearby values.
- delta_type: "percent" if delta step between nearby values should be a calculated as a percent, or "absolute" if delta should be a constant step change.
- """
- self.interpretation_steps = steps
- self.interpretation_delta = delta
- self.interpretation_delta_type = delta_type
- return self
-
- def get_interpretation_neighbors(self, x: float | int) -> Tuple[List[float], Dict]:
- x = self._round_to_precision(x, self.precision)
- if self.interpretation_delta_type == "percent":
- delta = 1.0 * self.interpretation_delta * x / 100
- elif self.interpretation_delta_type == "absolute":
- delta = self.interpretation_delta
- else:
- delta = self.interpretation_delta
- if self.precision == 0 and math.floor(delta) != delta:
- raise ValueError(
- f"Delta value {delta} is not an integer and precision=0. Cannot generate valid set of neighbors. "
- "If delta_type='percent', pick a value of delta such that x * delta is an integer. "
- "If delta_type='absolute', pick a value of delta that is an integer."
- )
- # run_interpretation will preprocess the neighbors so no need to covert to int here
- negatives = (
- np.array(x) + np.arange(-self.interpretation_steps, 0) * delta
- ).tolist()
- positives = (
- np.array(x) + np.arange(1, self.interpretation_steps + 1) * delta
- ).tolist()
- return negatives + positives, {}
-
- def get_interpretation_scores(
- self, x: float, neighbors: List[float], scores: List[float | None], **kwargs
- ) -> List[Tuple[float, float | None]]:
- """
- Returns:
- Each tuple set represents a numeric value near the input and its corresponding interpretation score.
- """
- interpretation = list(zip(neighbors, scores))
- interpretation.insert(int(len(interpretation) / 2), (x, None))
- return interpretation
-
- def generate_sample(self) -> float:
- return self._round_to_precision(1, self.precision)
-
-
-@document("change", "style")
-class Slider(FormComponent, Changeable, IOComponent, SimpleSerializable):
- """
- Creates a slider that ranges from `minimum` to `maximum` with a step size of `step`.
- Preprocessing: passes slider value as a {float} into the function.
- Postprocessing: expects an {int} or {float} returned from function and sets slider value to it as long as it is within range.
- Examples-format: A {float} or {int} representing the slider's value.
-
- Demos: sentence_builder, generate_tone, titanic_survival, interface_random_slider, blocks_random_slider
- Guides: create_your_own_friends_with_a_gan
- """
-
- def __init__(
- self,
- minimum: float = 0,
- maximum: float = 100,
- value: float | Callable | None = None,
- *,
- step: float | None = None,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- randomize: bool = False,
- **kwargs,
- ):
- """
- Parameters:
- minimum: minimum value for slider.
- maximum: maximum value for slider.
- value: default value. If callable, the function will be called whenever the app loads to set the initial value of the component. Ignored if randomized=True.
- step: increment between slider values.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, slider will be adjustable; if False, adjusting will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- randomize: If True, the value of the slider when the app loads is taken uniformly at random from the range given by the minimum and maximum.
- """
- self.minimum = minimum
- self.maximum = maximum
- if step is None:
- difference = maximum - minimum
- power = math.floor(math.log10(difference) - 2)
- self.step = 10**power
- else:
- self.step = step
- if randomize:
- value = self.get_random_value
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
- self.cleared_value = self.value
- self.test_input = self.value
- self.interpret_by_tokens = False
-
- def get_config(self):
- return {
- "minimum": self.minimum,
- "maximum": self.maximum,
- "step": self.step,
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- def get_random_value(self):
- n_steps = int((self.maximum - self.minimum) / self.step)
- step = random.randint(0, n_steps)
- value = self.minimum + step * self.step
- # Round to number of decimals in step so that UI doesn't display long decimals
- n_decimals = max(str(self.step)[::-1].find("."), 0)
- if n_decimals:
- value = round(value, n_decimals)
- return value
-
- @staticmethod
- def update(
- value: float | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- minimum: float | None = None,
- maximum: float | None = None,
- step: float | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "minimum": minimum,
- "maximum": maximum,
- "step": step,
- "label": label,
- "show_label": show_label,
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def generate_sample(self) -> float:
- return self.maximum
-
- def postprocess(self, y: float | None) -> float | None:
- """
- Any postprocessing needed to be performed on function output.
- Parameters:
- y: numeric output
- Returns:
- numeric output or minimum number if None
- """
- return self.minimum if y is None else y
-
- def set_interpret_parameters(self, steps: int = 8) -> "Slider":
- """
- Calculates interpretation scores of numeric values ranging between the minimum and maximum values of the slider.
- Parameters:
- steps: Number of neighboring values to measure between the minimum and maximum values of the slider range.
- """
- self.interpretation_steps = steps
- return self
-
- def get_interpretation_neighbors(self, x) -> Tuple[object, dict]:
- return (
- np.linspace(self.minimum, self.maximum, self.interpretation_steps).tolist(),
- {},
- )
-
- def get_interpretation_scores(
- self, x, neighbors, scores: List[float], **kwargs
- ) -> List[float]:
- """
- Returns:
- Each value represents the score corresponding to an evenly spaced range of inputs between the minimum and maximum slider values.
- """
- return scores
-
- def style(
- self,
- *,
- container: bool | None = None,
- ):
- """
- This method can be used to change the appearance of the slider.
- Parameters:
- container: If True, will place the component in a container - providing some extra padding around the border.
- """
- return Component.style(
- self,
- container=container,
- )
-
-
-@document("change", "style")
-class Checkbox(FormComponent, Changeable, IOComponent, SimpleSerializable):
- """
- Creates a checkbox that can be set to `True` or `False`.
-
- Preprocessing: passes the status of the checkbox as a {bool} into the function.
- Postprocessing: expects a {bool} returned from the function and, if it is True, checks the checkbox.
- Examples-format: a {bool} representing whether the box is checked.
- Demos: sentence_builder, titanic_survival
- """
-
- def __init__(
- self,
- value: bool | Callable = False,
- *,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: if True, checked by default. If callable, the function will be called whenever the app loads to set the initial value of the component.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, this checkbox can be checked; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.test_input = True
- self.interpret_by_tokens = False
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: bool | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "label": label,
- "show_label": show_label,
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def generate_sample(self):
- return True
-
- def set_interpret_parameters(self):
- """
- Calculates interpretation score of the input by comparing the output against the output when the input is the inverse boolean value of x.
- """
- return self
-
- def get_interpretation_neighbors(self, x):
- return [not x], {}
-
- def get_interpretation_scores(self, x, neighbors, scores, **kwargs):
- """
- Returns:
- The first value represents the interpretation score if the input is False, and the second if the input is True.
- """
- if x:
- return scores[0], None
- else:
- return None, scores[0]
-
-
-@document("change", "style")
-class CheckboxGroup(FormComponent, Changeable, IOComponent, SimpleSerializable):
- """
- Creates a set of checkboxes of which a subset can be checked.
- Preprocessing: passes the list of checked checkboxes as a {List[str]} or their indices as a {List[int]} into the function, depending on `type`.
- Postprocessing: expects a {List[str]}, each element of which becomes a checked checkbox.
- Examples-format: a {List[str]} representing the values to be checked.
- Demos: sentence_builder, titanic_survival
- """
-
- def __init__(
- self,
- choices: List[str] | None = None,
- *,
- value: List[str] | str | Callable | None = None,
- type: str = "value",
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- choices: list of options to select from.
- value: default selected list of options. If callable, the function will be called whenever the app loads to set the initial value of the component.
- type: Type of value to be returned by component. "value" returns the list of strings of the choices selected, "index" returns the list of indicies of the choices selected.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, choices in this checkbox group will be checkable; if False, checking will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.choices = choices or []
- self.cleared_value = []
- valid_types = ["value", "index"]
- if type not in valid_types:
- raise ValueError(
- f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
- )
- self.type = type
- self.test_input = self.choices
- self.interpret_by_tokens = False
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "choices": self.choices,
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: List[str]
- | str
- | Literal[_Keywords.NO_VALUE]
- | None = _Keywords.NO_VALUE,
- choices: List[str] | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "choices": choices,
- "label": label,
- "show_label": show_label,
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def generate_sample(self):
- return self.choices
-
- def preprocess(self, x: List[str]) -> List[str] | List[int]:
- """
- Parameters:
- x: list of selected choices
- Returns:
- list of selected choices as strings or indices within choice list
- """
- if self.type == "value":
- return x
- elif self.type == "index":
- return [self.choices.index(choice) for choice in x]
- else:
- raise ValueError(
- "Unknown type: "
- + str(self.type)
- + ". Please choose from: 'value', 'index'."
- )
-
- def postprocess(self, y: List[str] | str | None) -> List[str]:
- """
- Any postprocessing needed to be performed on function output.
- Parameters:
- y: List of selected choices. If a single choice is selected, it can be passed in as a string
- Returns:
- List of selected choices
- """
- if y is None:
- return []
- if not isinstance(y, list):
- y = [y]
- return y
-
- def set_interpret_parameters(self):
- """
- Calculates interpretation score of each choice in the input by comparing the output against the outputs when each choice in the input is independently either removed or added.
- """
- return self
-
- def get_interpretation_neighbors(self, x):
- leave_one_out_sets = []
- for choice in self.choices:
- leave_one_out_set = list(x)
- if choice in leave_one_out_set:
- leave_one_out_set.remove(choice)
- else:
- leave_one_out_set.append(choice)
- leave_one_out_sets.append(leave_one_out_set)
- return leave_one_out_sets, {}
-
- def get_interpretation_scores(self, x, neighbors, scores, **kwargs):
- """
- Returns:
- For each tuple in the list, the first value represents the interpretation score if the input is False, and the second if the input is True.
- """
- final_scores = []
- for choice, score in zip(self.choices, scores):
- if choice in x:
- score_set = [score, None]
- else:
- score_set = [None, score]
- final_scores.append(score_set)
- return final_scores
-
- def style(
- self,
- *,
- item_container: bool | None = None,
- container: bool | None = None,
- **kwargs,
- ):
- """
- This method can be used to change the appearance of the CheckboxGroup.
- Parameters:
- item_container: If True, will place the items in a container.
- container: If True, will place the component in a container - providing some extra padding around the border.
- """
- if item_container is not None:
- self._style["item_container"] = item_container
-
- return Component.style(self, container=container, **kwargs)
-
-
-@document("change", "style")
-class Radio(FormComponent, Changeable, IOComponent, SimpleSerializable):
- """
- Creates a set of radio buttons of which only one can be selected.
- Preprocessing: passes the value of the selected radio button as a {str} or its index as an {int} into the function, depending on `type`.
- Postprocessing: expects a {str} corresponding to the value of the radio button to be selected.
- Examples-format: a {str} representing the radio option to select.
-
- Demos: sentence_builder, titanic_survival, blocks_essay
- """
-
- def __init__(
- self,
- choices: List[str] | None = None,
- *,
- value: str | Callable | None = None,
- type: str = "value",
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- choices: list of options to select from.
- value: the button selected by default. If None, no button is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component.
- type: Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, choices in this radio group will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.choices = choices or []
- valid_types = ["value", "index"]
- if type not in valid_types:
- raise ValueError(
- f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
- )
- self.type = type
- self.test_input = self.choices[0] if len(self.choices) else None
- self.interpret_by_tokens = False
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
- self.cleared_value = self.value
-
- def get_config(self):
- return {
- "choices": self.choices,
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- choices: List[str] | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "choices": choices,
- "label": label,
- "show_label": show_label,
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def generate_sample(self):
- return self.choices[0]
-
- def preprocess(self, x: str | None) -> str | int | None:
- """
- Parameters:
- x: selected choice
- Returns:
- selected choice as string or index within choice list
- """
- if self.type == "value":
- return x
- elif self.type == "index":
- if x is None:
- return None
- else:
- return self.choices.index(x)
- else:
- raise ValueError(
- "Unknown type: "
- + str(self.type)
- + ". Please choose from: 'value', 'index'."
- )
-
- def set_interpret_parameters(self):
- """
- Calculates interpretation score of each choice by comparing the output against each of the outputs when alternative choices are selected.
- """
- return self
-
- def get_interpretation_neighbors(self, x):
- choices = list(self.choices)
- choices.remove(x)
- return choices, {}
-
- def get_interpretation_scores(
- self, x, neighbors, scores: List[float | None], **kwargs
- ) -> List:
- """
- Returns:
- Each value represents the interpretation score corresponding to each choice.
- """
- scores.insert(self.choices.index(x), None)
- return scores
-
- def style(
- self,
- *,
- item_container: bool | None = None,
- container: bool | None = None,
- **kwargs,
- ):
- """
- This method can be used to change the appearance of the radio component.
- Parameters:
- item_container: If True, will place items in a container.
- container: If True, will place the component in a container - providing some extra padding around the border.
- """
- if item_container is not None:
- self._style["item_container"] = item_container
-
- return Component.style(self, container=container, **kwargs)
-
-
-@document("change", "style")
-class Dropdown(Radio):
- """
- Creates a dropdown of which only one entry can be selected.
- Preprocessing: passes the value of the selected dropdown entry as a {str} or its index as an {int} into the function, depending on `type`.
- Postprocessing: expects a {str} corresponding to the value of the dropdown entry to be selected.
- Examples-format: a {str} representing the drop down value to select.
- Demos: sentence_builder, titanic_survival
- """
-
- def __init__(
- self,
- choices: List[str] | None = None,
- *,
- value: str | Callable | None = None,
- type: str = "value",
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- choices: list of options to select from.
- value: default value selected in dropdown. If None, no value is selected by default. If callable, the function will be called whenever the app loads to set the initial value of the component.
- type: Type of value to be returned by component. "value" returns the string of the choice selected, "index" returns the index of the choice selected.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, choices in this dropdown will be selectable; if False, selection will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- Radio.__init__(
- self,
- value=value,
- choices=choices,
- type=type,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- **kwargs,
- )
-
- def style(self, *, container: bool | None = None, **kwargs):
- """
- This method can be used to change the appearance of the Dropdown.
- Parameters:
- container: If True, will place the component in a container - providing some extra padding around the border.
- """
- return Component.style(self, container=container, **kwargs)
-
-
-@document("edit", "clear", "change", "stream", "change", "style")
-class Image(
- Editable,
- Clearable,
- Changeable,
- Streamable,
- Uploadable,
- IOComponent,
- ImgSerializable,
-):
- """
- Creates an image component that can be used to upload/draw images (as an input) or display images (as an output).
- Preprocessing: passes the uploaded image as a {numpy.array}, {PIL.Image} or {str} filepath depending on `type` -- unless `tool` is `sketch` AND source is one of `upload` or `webcam`. In these cases, a {dict} with keys `image` and `mask` is passed, and the format of the corresponding values depends on `type`.
- Postprocessing: expects a {numpy.array}, {PIL.Image} or {str} or {pathlib.Path} filepath to an image and displays the image.
- Examples-format: a {str} filepath to a local file that contains the image.
- Demos: image_mod, image_mod_default_image
- Guides: Gradio_and_ONNX_on_Hugging_Face, image_classification_in_pytorch, image_classification_in_tensorflow, image_classification_with_vision_transformers, building_a_pictionary_app, create_your_own_friends_with_a_gan
- """
-
- def __init__(
- self,
- value: str | _Image.Image | np.ndarray | None = None,
- *,
- shape: Tuple[int, int] | None = None,
- image_mode: str = "RGB",
- invert_colors: bool = False,
- source: str = "upload",
- tool: str | None = None,
- type: str = "numpy",
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- streaming: bool = False,
- elem_id: str | None = None,
- mirror_webcam: bool = True,
- **kwargs,
- ):
- """
- Parameters:
- value: A PIL Image, numpy array, path or URL for the default value that Image component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component.
- shape: (width, height) shape to crop and resize image to; if None, matches input image size. Pass None for either width or height to only crop and resize the other.
- image_mode: "RGB" if color, or "L" if black and white.
- invert_colors: whether to invert the image as a preprocessing step.
- source: Source of image. "upload" creates a box where user can drop an image file, "webcam" allows user to take snapshot from their webcam, "canvas" defaults to a white image that can be edited and drawn upon with tools.
- tool: Tools used for editing. "editor" allows a full screen editor (and is the default if source is "upload" or "webcam"), "select" provides a cropping and zoom tool, "sketch" allows you to create a binary sketch (and is the default if source="canvas"), and "color-sketch" allows you to created a sketch in different colors. "color-sketch" can be used with source="upload" or "webcam" to allow sketching on an image. "sketch" can also be used with "upload" or "webcam" to create a mask over an image and in that case both the image and mask are passed into the function as a dictionary with keys "image" and "mask" respectively.
- type: The format the image is converted to before being passed into the prediction function. "numpy" converts the image to a numpy array with shape (width, height, 3) and values from 0 to 255, "pil" converts the image to a PIL image object, "filepath" passes a str path to a temporary file containing the image.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, will allow users to upload and edit an image; if False, can only be used to display images. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- streaming: If True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'webcam'.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- mirror_webcam: If True webcam will be mirrored. Default is True.
- """
- self.mirror_webcam = mirror_webcam
- valid_types = ["numpy", "pil", "filepath"]
- if type not in valid_types:
- raise ValueError(
- f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
- )
- self.type = type
- self.shape = shape
- self.image_mode = image_mode
- valid_sources = ["upload", "webcam", "canvas"]
- if source not in valid_sources:
- raise ValueError(
- f"Invalid value for parameter `source`: {source}. Please choose from one of: {valid_sources}"
- )
- self.source = source
- if tool is None:
- self.tool = "sketch" if source == "canvas" else "editor"
- else:
- self.tool = tool
- self.invert_colors = invert_colors
- self.test_input = deepcopy(media_data.BASE64_IMAGE)
- self.interpret_by_tokens = True
- self.streaming = streaming
- if streaming and source != "webcam":
- raise ValueError("Image streaming only available if source is 'webcam'.")
-
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "image_mode": self.image_mode,
- "shape": self.shape,
- "source": self.source,
- "tool": self.tool,
- "value": self.value,
- "streaming": self.streaming,
- "mirror_webcam": self.mirror_webcam,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "label": label,
- "show_label": show_label,
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def _format_image(
- self, im: _Image.Image | None
- ) -> np.ndarray | _Image.Image | str | None:
- """Helper method to format an image based on self.type"""
- if im is None:
- return im
- fmt = im.format
- if self.type == "pil":
- return im
- elif self.type == "numpy":
- return np.array(im)
- elif self.type == "filepath":
- file_obj = tempfile.NamedTemporaryFile(
- delete=False,
- suffix=("." + fmt.lower() if fmt is not None else ".png"),
- )
- im.save(file_obj.name)
- return file_obj.name
- else:
- raise ValueError(
- "Unknown type: "
- + str(self.type)
- + ". Please choose from: 'numpy', 'pil', 'filepath'."
- )
-
- def generate_sample(self):
- return deepcopy(media_data.BASE64_IMAGE)
-
- def preprocess(
- self, x: str | Dict[str, str]
- ) -> np.ndarray | _Image.Image | str | Dict | None:
- """
- Parameters:
- x: base64 url data, or (if tool == "sketch") a dict of image and mask base64 url data
- Returns:
- image in requested format, or (if tool == "sketch") a dict of image and mask in requested format
- """
- if x is None:
- return x
-
- mask = ""
- if self.tool == "sketch" and self.source in ["upload", "webcam"]:
- assert isinstance(x, dict)
- x, mask = x["image"], x["mask"]
-
- assert isinstance(x, str)
- im = processing_utils.decode_base64_to_image(x)
- with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- im = im.convert(self.image_mode)
- if self.shape is not None:
- im = processing_utils.resize_and_crop(im, self.shape)
- if self.invert_colors:
- im = PIL.ImageOps.invert(im)
- if (
- self.source == "webcam"
- and self.mirror_webcam is True
- and self.tool != "color-sketch"
- ):
- im = PIL.ImageOps.mirror(im)
-
- if self.tool == "sketch" and self.source in ["upload", "webcam"]:
- if mask is None:
- return self._format_image(im)
- mask_im = processing_utils.decode_base64_to_image(mask)
- return {
- "image": self._format_image(im),
- "mask": self._format_image(mask_im),
- }
-
- return self._format_image(im)
-
- def postprocess(
- self, y: np.ndarray | _Image.Image | str | Path | None
- ) -> str | None:
- """
- Parameters:
- y: image as a numpy array, PIL Image, string/Path filepath, or string URL
- Returns:
- base64 url data
- """
- if y is None:
- return None
- if isinstance(y, np.ndarray):
- return processing_utils.encode_array_to_base64(y)
- elif isinstance(y, _Image.Image):
- return processing_utils.encode_pil_to_base64(y)
- elif isinstance(y, (str, Path)):
- return processing_utils.encode_url_or_file_to_base64(y)
- else:
- raise ValueError("Cannot process this value as an Image")
-
- def set_interpret_parameters(self, segments: int = 16):
- """
- Calculates interpretation score of image subsections by splitting the image into subsections, then using a "leave one out" method to calculate the score of each subsection by whiting out the subsection and measuring the delta of the output value.
- Parameters:
- segments: Number of interpretation segments to split image into.
- """
- self.interpretation_segments = segments
- return self
-
- def _segment_by_slic(self, x):
- """
- Helper method that segments an image into superpixels using slic.
- Parameters:
- x: base64 representation of an image
- """
- x = processing_utils.decode_base64_to_image(x)
- if self.shape is not None:
- x = processing_utils.resize_and_crop(x, self.shape)
- resized_and_cropped_image = np.array(x)
- try:
- from skimage.segmentation import slic
- except (ImportError, ModuleNotFoundError):
- raise ValueError(
- "Error: running this interpretation for images requires scikit-image, please install it first."
- )
- try:
- segments_slic = slic(
- resized_and_cropped_image,
- self.interpretation_segments,
- compactness=10,
- sigma=1,
- start_label=1,
- )
- except TypeError: # For skimage 0.16 and older
- segments_slic = slic(
- resized_and_cropped_image,
- self.interpretation_segments,
- compactness=10,
- sigma=1,
- )
- return segments_slic, resized_and_cropped_image
-
- def tokenize(self, x):
- """
- Segments image into tokens, masks, and leave-one-out-tokens
- Parameters:
- x: base64 representation of an image
- Returns:
- tokens: list of tokens, used by the get_masked_input() method
- leave_one_out_tokens: list of left-out tokens, used by the get_interpretation_neighbors() method
- masks: list of masks, used by the get_interpretation_neighbors() method
- """
- segments_slic, resized_and_cropped_image = self._segment_by_slic(x)
- tokens, masks, leave_one_out_tokens = [], [], []
- replace_color = np.mean(resized_and_cropped_image, axis=(0, 1))
- for (i, segment_value) in enumerate(np.unique(segments_slic)):
- mask = segments_slic == segment_value
- image_screen = np.copy(resized_and_cropped_image)
- image_screen[segments_slic == segment_value] = replace_color
- leave_one_out_tokens.append(
- processing_utils.encode_array_to_base64(image_screen)
- )
- token = np.copy(resized_and_cropped_image)
- token[segments_slic != segment_value] = 0
- tokens.append(token)
- masks.append(mask)
- return tokens, leave_one_out_tokens, masks
-
- def get_masked_inputs(self, tokens, binary_mask_matrix):
- masked_inputs = []
- for binary_mask_vector in binary_mask_matrix:
- masked_input = np.zeros_like(tokens[0], dtype=int)
- for token, b in zip(tokens, binary_mask_vector):
- masked_input = masked_input + token * int(b)
- masked_inputs.append(processing_utils.encode_array_to_base64(masked_input))
- return masked_inputs
-
- def get_interpretation_scores(
- self, x, neighbors, scores, masks, tokens=None, **kwargs
- ) -> List[List[float]]:
- """
- Returns:
- A 2D array representing the interpretation score of each pixel of the image.
- """
- x = processing_utils.decode_base64_to_image(x)
- if self.shape is not None:
- x = processing_utils.resize_and_crop(x, self.shape)
- x = np.array(x)
- output_scores = np.zeros((x.shape[0], x.shape[1]))
-
- for score, mask in zip(scores, masks):
- output_scores += score * mask
-
- max_val, min_val = np.max(output_scores), np.min(output_scores)
- if max_val > 0:
- output_scores = (output_scores - min_val) / (max_val - min_val)
- return output_scores.tolist()
-
- def style(self, *, height: int | None = None, width: int | None = None, **kwargs):
- """
- This method can be used to change the appearance of the Image component.
- Parameters:
- height: Height of the image.
- width: Width of the image.
- """
- self._style["height"] = height
- self._style["width"] = width
- return Component.style(
- self,
- **kwargs,
- )
-
- def stream(
- self,
- fn: Callable,
- inputs: List[Component],
- outputs: List[Component],
- _js: str | None = None,
- api_name: str | None = None,
- preprocess: bool = True,
- postprocess: bool = True,
- ):
- """
- This event is triggered when the user streams the component (e.g. a live webcam
- component)
- Parameters:
- fn: Callable function
- inputs: List of inputs
- outputs: List of outputs
- """
- # js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- if self.source != "webcam":
- raise ValueError("Image streaming only available if source is 'webcam'.")
- Streamable.stream(
- self,
- fn,
- inputs,
- outputs,
- _js=_js,
- api_name=api_name,
- preprocess=preprocess,
- postprocess=postprocess,
- )
-
- def as_example(self, input_data: str | None) -> str:
- return "" if input_data is None else str(Path(input_data).resolve())
-
-
-@document("change", "clear", "play", "pause", "stop", "style")
-class Video(
- Changeable,
- Clearable,
- Playable,
- Uploadable,
- IOComponent,
- FileSerializable,
- TempFileManager,
-):
- """
- Creates a video component that can be used to upload/record videos (as an input) or display videos (as an output).
- For the video to be playable in the browser it must have a compatible container and codec combination. Allowed
- combinations are .mp4 with h264 codec, .ogg with theora codec, and .webm with vp9 codec. If the component detects
- that the output video would not be playable in the browser it will attempt to convert it to a playable mp4 video.
- If the conversion fails, the original video is returned.
- Preprocessing: passes the uploaded video as a {str} filepath or URL whose extension can be modified by `format`.
- Postprocessing: expects a {str} filepath to a video which is displayed.
- Examples-format: a {str} filepath to a local file that contains the video.
- Demos: video_identity
- """
-
- def __init__(
- self,
- value: str | Callable | None = None,
- *,
- format: str | None = None,
- source: str = "upload",
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- mirror_webcam: bool = True,
- include_audio: bool | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: A path or URL for the default value that Video component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component.
- format: Format of video format to be returned by component, such as 'avi' or 'mp4'. Use 'mp4' to ensure browser playability. If set to None, video will keep uploaded format.
- source: Source of video. "upload" creates a box where user can drop an video file, "webcam" allows user to record a video from their webcam.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, will allow users to upload a video; if False, can only be used to display videos. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- mirror_webcam: If True webcam will be mirrored. Default is True.
- include_audio: Whether the component should record/retain the audio track for a video. By default, audio is excluded for webcam videos and included for uploaded videos.
- """
- self.format = format
- valid_sources = ["upload", "webcam"]
- if source not in valid_sources:
- raise ValueError(
- f"Invalid value for parameter `source`: {source}. Please choose from one of: {valid_sources}"
- )
- self.source = source
- self.mirror_webcam = mirror_webcam
- self.include_audio = (
- include_audio if include_audio is not None else source == "upload"
- )
- TempFileManager.__init__(self)
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "source": self.source,
- "value": self.value,
- "mirror_webcam": self.mirror_webcam,
- "include_audio": self.include_audio,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- source: str | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "source": source,
- "label": label,
- "show_label": show_label,
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def preprocess(self, x: Dict[str, str] | None) -> str | None:
- """
- Parameters:
- x: a dictionary with the following keys: 'name' (containing the file path to a video), 'data' (with either the file URL or base64 representation of the video), and 'is_file` (True if `data` contains the file URL).
- Returns:
- a string file path to the preprocessed video
- """
- if x is None:
- return x
-
- file_name, file_data, is_file = (
- x["name"],
- x["data"],
- x.get("is_file", False),
- )
- if is_file:
- file = self.make_temp_copy_if_needed(file_name)
- file_name = Path(file)
- else:
- file = processing_utils.decode_base64_to_file(
- file_data, file_path=file_name
- )
- file_name = Path(file.name)
-
- uploaded_format = file_name.suffix.replace(".", "")
- modify_format = self.format is not None and uploaded_format != self.format
- flip = self.source == "webcam" and self.mirror_webcam
- if modify_format or flip:
- format = f".{self.format if modify_format else uploaded_format}"
- output_options = ["-vf", "hflip", "-c:a", "copy"] if flip else []
- output_options += ["-an"] if not self.include_audio else []
- flip_suffix = "_flip" if flip else ""
- output_file_name = str(
- file_name.with_name(f"{file_name.stem}{flip_suffix}{format}")
- )
- if Path(output_file_name).exists():
- return output_file_name
- ff = FFmpeg(
- inputs={str(file_name): None},
- outputs={output_file_name: output_options},
- )
- ff.run()
- return output_file_name
- elif not self.include_audio:
- output_file_name = str(file_name.with_name(f"muted_{file_name.name}"))
- ff = FFmpeg(
- inputs={str(file_name): None},
- outputs={output_file_name: ["-an"]},
- )
- ff.run()
- return output_file_name
- else:
- return str(file_name)
-
- def generate_sample(self):
- """Generates a random video for testing the API."""
- return deepcopy(media_data.BASE64_VIDEO)
-
- def postprocess(self, y: str | None) -> Dict[str, Any] | None:
- """
- Processes a video to ensure that it is in the correct format before
- returning it to the front end.
- Parameters:
- y: a path or URL to the video file
- Returns:
- a dictionary with the following keys: 'name' (containing the file path
- to a temporary copy of the video), 'data' (None), and 'is_file` (True).
- """
- if y is None:
- return None
-
- returned_format = y.split(".")[-1].lower()
-
- if self.format is None or returned_format == self.format:
- conversion_needed = False
- else:
- conversion_needed = True
-
- # For cases where the video is a URL and does not need to be converted to another format, we can just return the URL
- if utils.validate_url(y) and not (conversion_needed):
- return {"name": y, "data": None, "is_file": True}
-
- # For cases where the video needs to be converted to another format
- if utils.validate_url(y):
- y = self.download_temp_copy_if_needed(y)
- if (
- processing_utils.ffmpeg_installed()
- and not processing_utils.video_is_playable(y)
- ):
- warnings.warn(
- "Video does not have browser-compatible container or codec. Converting to mp4"
- )
- y = processing_utils.convert_video_to_playable_mp4(y)
- if self.format is not None and returned_format != self.format:
- output_file_name = y[0 : y.rindex(".") + 1] + self.format
- ff = FFmpeg(inputs={y: None}, outputs={output_file_name: None})
- ff.run()
- y = output_file_name
-
- y = self.make_temp_copy_if_needed(y)
- return {"name": y, "data": None, "is_file": True}
-
- def style(self, *, height: int | None = None, width: int | None = None, **kwargs):
- """
- This method can be used to change the appearance of the video component.
- Parameters:
- height: Height of the video.
- width: Width of the video.
- """
- self._style["height"] = height
- self._style["width"] = width
- return Component.style(
- self,
- **kwargs,
- )
-
-
-@document("change", "clear", "play", "pause", "stop", "stream", "style")
-class Audio(
- Changeable,
- Clearable,
- Playable,
- Streamable,
- Uploadable,
- IOComponent,
- FileSerializable,
- TempFileManager,
-):
- """
- Creates an audio component that can be used to upload/record audio (as an input) or display audio (as an output).
- Preprocessing: passes the uploaded audio as a {Tuple(int, numpy.array)} corresponding to (sample rate, data) or as a {str} filepath, depending on `type`
- Postprocessing: expects a {Tuple(int, numpy.array)} corresponding to (sample rate, data) or as a {str} filepath or URL to an audio file, which gets displayed
- Examples-format: a {str} filepath to a local file that contains audio.
- Demos: main_note, generate_tone, reverse_audio
- Guides: real_time_speech_recognition
- """
-
- def __init__(
- self,
- value: str | Tuple[int, np.ndarray] | Callable | None = None,
- *,
- source: str = "upload",
- type: str = "numpy",
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- streaming: bool = False,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: A path, URL, or [sample_rate, numpy array] tuple for the default value that Audio component is going to take. If callable, the function will be called whenever the app loads to set the initial value of the component.
- source: Source of audio. "upload" creates a box where user can drop an audio file, "microphone" creates a microphone input.
- type: The format the audio file is converted to before being passed into the prediction function. "numpy" converts the audio to a tuple consisting of: (int sample rate, numpy.array for the data), "filepath" passes a str path to a temporary file containing the audio.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, will allow users to upload and edit a audio file; if False, can only be used to play audio. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- streaming: If set to True when used in a `live` interface, will automatically stream webcam feed. Only valid is source is 'microphone'.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- valid_sources = ["upload", "microphone"]
- if source not in valid_sources:
- raise ValueError(
- f"Invalid value for parameter `source`: {source}. Please choose from one of: {valid_sources}"
- )
- self.source = source
- valid_types = ["numpy", "filepath"]
- if type not in valid_types:
- raise ValueError(
- f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
- )
- self.type = type
- self.test_input = deepcopy(media_data.BASE64_AUDIO)
- self.interpret_by_tokens = True
- self.streaming = streaming
- if streaming and source != "microphone":
- raise ValueError(
- "Audio streaming only available if source is 'microphone'."
- )
- TempFileManager.__init__(self)
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "source": self.source,
- "value": self.value,
- "streaming": self.streaming,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- source: str | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "source": source,
- "label": label,
- "show_label": show_label,
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def preprocess(
- self, x: Dict[str, Any] | None
- ) -> Tuple[int, np.ndarray] | str | None:
- """
- Parameters:
- x: dictionary with keys "name", "data", "is_file", "crop_min", "crop_max".
- Returns:
- audio in requested format
- """
- if x is None:
- return x
- file_name, file_data, is_file = (
- x["name"],
- x["data"],
- x.get("is_file", False),
- )
- crop_min, crop_max = x.get("crop_min", 0), x.get("crop_max", 100)
- if is_file:
- if utils.validate_url(file_name):
- temp_file_path = self.download_temp_copy_if_needed(file_name)
- else:
- temp_file_path = self.make_temp_copy_if_needed(file_name)
- else:
- temp_file_obj = processing_utils.decode_base64_to_file(
- file_data, file_path=file_name
- )
- temp_file_path = temp_file_obj.name
-
- sample_rate, data = processing_utils.audio_from_file(
- temp_file_path, crop_min=crop_min, crop_max=crop_max
- )
-
- if self.type == "numpy":
- return sample_rate, data
- elif self.type == "filepath":
- processing_utils.audio_to_file(sample_rate, data, temp_file_path)
- return temp_file_path
- else:
- raise ValueError(
- "Unknown type: "
- + str(self.type)
- + ". Please choose from: 'numpy', 'filepath'."
- )
-
- def set_interpret_parameters(self, segments: int = 8):
- """
- Calculates interpretation score of audio subsections by splitting the audio into subsections, then using a "leave one out" method to calculate the score of each subsection by removing the subsection and measuring the delta of the output value.
- Parameters:
- segments: Number of interpretation segments to split audio into.
- """
- self.interpretation_segments = segments
- return self
-
- def tokenize(self, x):
- if x.get("is_file"):
- sample_rate, data = processing_utils.audio_from_file(x["name"])
- else:
- file_obj = processing_utils.decode_base64_to_file(x["data"])
- sample_rate, data = processing_utils.audio_from_file(file_obj.name)
- leave_one_out_sets = []
- tokens = []
- masks = []
- duration = data.shape[0]
- boundaries = np.linspace(0, duration, self.interpretation_segments + 1).tolist()
- boundaries = [round(boundary) for boundary in boundaries]
- for index in range(len(boundaries) - 1):
- start, stop = boundaries[index], boundaries[index + 1]
- masks.append((start, stop))
-
- # Handle the leave one outs
- leave_one_out_data = np.copy(data)
- leave_one_out_data[start:stop] = 0
- file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
- processing_utils.audio_to_file(sample_rate, leave_one_out_data, file.name)
- out_data = processing_utils.encode_file_to_base64(file.name)
- leave_one_out_sets.append(out_data)
- file.close()
- Path(file.name).unlink()
-
- # Handle the tokens
- token = np.copy(data)
- token[0:start] = 0
- token[stop:] = 0
- file = tempfile.NamedTemporaryFile(delete=False, suffix=".wav")
- processing_utils.audio_to_file(sample_rate, token, file.name)
- token_data = processing_utils.encode_file_to_base64(file.name)
- file.close()
- Path(file.name).unlink()
-
- tokens.append(token_data)
- tokens = [{"name": "token.wav", "data": token} for token in tokens]
- leave_one_out_sets = [
- {"name": "loo.wav", "data": loo_set} for loo_set in leave_one_out_sets
- ]
- return tokens, leave_one_out_sets, masks
-
- def get_masked_inputs(self, tokens, binary_mask_matrix):
- # create a "zero input" vector and get sample rate
- x = tokens[0]["data"]
- file_obj = processing_utils.decode_base64_to_file(x)
- sample_rate, data = processing_utils.audio_from_file(file_obj.name)
- zero_input = np.zeros_like(data, dtype="int16")
- # decode all of the tokens
- token_data = []
- for token in tokens:
- file_obj = processing_utils.decode_base64_to_file(token["data"])
- _, data = processing_utils.audio_from_file(file_obj.name)
- token_data.append(data)
- # construct the masked version
- masked_inputs = []
- for binary_mask_vector in binary_mask_matrix:
- masked_input = np.copy(zero_input)
- for t, b in zip(token_data, binary_mask_vector):
- masked_input = masked_input + t * int(b)
- file = tempfile.NamedTemporaryFile(delete=False)
- processing_utils.audio_to_file(sample_rate, masked_input, file.name)
- masked_data = processing_utils.encode_file_to_base64(file.name)
- file.close()
- Path(file.name).unlink()
- masked_inputs.append(masked_data)
- return masked_inputs
-
- def get_interpretation_scores(
- self, x, neighbors, scores, masks=None, tokens=None
- ) -> List[float]:
- """
- Returns:
- Each value represents the interpretation score corresponding to an evenly spaced subsection of audio.
- """
- return list(scores)
-
- def generate_sample(self):
- return deepcopy(media_data.BASE64_AUDIO)
-
- def postprocess(self, y: Tuple[int, np.ndarray] | str | None) -> str | Dict | None:
- """
- Parameters:
- y: audio data in either of the following formats: a tuple of (sample_rate, data), or a string filepath or URL to an audio file, or None.
- Returns:
- base64 url data
- """
- if y is None:
- return None
- if isinstance(y, str) and utils.validate_url(y):
- return {"name": y, "data": None, "is_file": True}
- if isinstance(y, tuple):
- sample_rate, data = y
- file = tempfile.NamedTemporaryFile(suffix=".wav", delete=False)
- processing_utils.audio_to_file(sample_rate, data, file.name)
- file_path = file.name
- self.temp_files.add(file_path)
- else:
- file_path = self.make_temp_copy_if_needed(y)
- return {"name": file_path, "data": None, "is_file": True}
-
- def stream(
- self,
- fn: Callable,
- inputs: List[Component],
- outputs: List[Component],
- _js: str | None = None,
- api_name: str | None = None,
- preprocess: bool = True,
- postprocess: bool = True,
- ):
- """
- This event is triggered when the user streams the component (e.g. a live webcam
- component)
- Parameters:
- fn: Callable function
- inputs: List of inputs
- outputs: List of outputs
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- if self.source != "microphone":
- raise ValueError(
- "Audio streaming only available if source is 'microphone'."
- )
- Streamable.stream(
- self,
- fn,
- inputs,
- outputs,
- _js=_js,
- api_name=api_name,
- preprocess=preprocess,
- postprocess=postprocess,
- )
-
- def style(
- self,
- **kwargs,
- ):
- """
- This method can be used to change the appearance of the audio component.
- """
- return Component.style(
- self,
- **kwargs,
- )
-
- def as_example(self, input_data: str | None) -> str:
- return Path(input_data).name if input_data else ""
-
-
-@document("change", "clear", "style")
-class File(
- Changeable, Clearable, Uploadable, IOComponent, FileSerializable, TempFileManager
-):
- """
- Creates a file component that allows uploading generic file (when used as an input) and or displaying generic files (output).
- Preprocessing: passes the uploaded file as a {file-object} or {List[file-object]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`)
- Postprocessing: expects function to return a {str} path to a file, or {List[str]} consisting of paths to files.
- Examples-format: a {str} path to a local file that populates the component.
- Demos: zip_to_json, zip_files
- """
-
- def __init__(
- self,
- value: str | List[str] | Callable | None = None,
- *,
- file_count: str = "single",
- file_types: List[str] | None = None,
- type: str = "file",
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Default file to display, given as str file path. If callable, the function will be called whenever the app loads to set the initial value of the component.
- file_count: if single, allows user to upload one file. If "multiple", user uploads multiple files. If "directory", user uploads all files in selected directory. Return type will be list for each file in case of "multiple" or "directory".
- file_types: List of type of files to be uploaded. "file" allows any file to be uploaded, "image" allows only image files to be uploaded, "audio" allows only audio files to be uploaded, "video" allows only video files to be uploaded, "text" allows only text files to be uploaded.
- type: Type of value to be returned by component. "file" returns a temporary file object whose path can be retrieved by file_obj.name and original filename can be retrieved with file_obj.orig_name, "binary" returns an bytes object.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, will allow users to upload a file; if False, can only be used to display files. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.file_count = file_count
- self.file_types = file_types
- valid_types = [
- "file",
- "binary",
- "bytes",
- ] # "bytes" is included for backwards compatibility
- if type not in valid_types:
- raise ValueError(
- f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
- )
- if type == "bytes":
- warnings.warn(
- "The `bytes` type is deprecated and may not work as expected. Please use `binary` instead."
- )
- self.type = type
- self.test_input = None
- TempFileManager.__init__(self)
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "file_count": self.file_count,
- "file_types": self.file_types,
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "label": label,
- "show_label": show_label,
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def preprocess(
- self, x: List[Dict[str, Any]] | None
- ) -> bytes | tempfile._TemporaryFileWrapper | List[
- bytes | tempfile._TemporaryFileWrapper
- ] | None:
- """
- Parameters:
- x: List of JSON objects with filename as 'name' property and base64 data as 'data' property
- Returns:
- File objects in requested format
- """
- if x is None:
- return None
-
- def process_single_file(f) -> bytes | tempfile._TemporaryFileWrapper:
- file_name, data, is_file = (
- f["name"],
- f["data"],
- f.get("is_file", False),
- )
- if self.type == "file":
- if is_file:
- temp_file_path = self.make_temp_copy_if_needed(file_name)
- file = tempfile.NamedTemporaryFile(delete=False)
- file.name = temp_file_path
- file.orig_name = file_name # type: ignore
- else:
- file = processing_utils.decode_base64_to_file(
- data, file_path=file_name
- )
- file.orig_name = file_name # type: ignore
- return file
- elif (
- self.type == "binary" or self.type == "bytes"
- ): # "bytes" is included for backwards compatibility
- if is_file:
- with open(file_name, "rb") as file_data:
- return file_data.read()
- return processing_utils.decode_base64_to_binary(data)[0]
- else:
- raise ValueError(
- "Unknown type: "
- + str(self.type)
- + ". Please choose from: 'file', 'bytes'."
- )
-
- if self.file_count == "single":
- if isinstance(x, list):
- return process_single_file(x[0])
- else:
- return process_single_file(x)
- else:
- if isinstance(x, list):
- return [process_single_file(f) for f in x]
- else:
- return process_single_file(x)
-
- def generate_sample(self):
- return deepcopy(media_data.BASE64_FILE)
-
- def postprocess(
- self, y: str | List[str] | None
- ) -> Dict[str, Any] | List[Dict[str, Any]] | None:
- """
- Parameters:
- y: file path
- Returns:
- JSON object with key 'name' for filename, 'data' for base64 url, and 'size' for filesize in bytes
- """
- if y is None:
- return None
- if isinstance(y, list):
- return [
- {
- "orig_name": Path(file).name,
- "name": self.make_temp_copy_if_needed(file),
- "size": Path(file).stat().st_size,
- "data": None,
- "is_file": True,
- }
- for file in y
- ]
- else:
- return {
- "orig_name": Path(y).name,
- "name": self.make_temp_copy_if_needed(y),
- "size": Path(y).stat().st_size,
- "data": None,
- "is_file": True,
- }
-
- def serialize(
- self, x: str | None, load_dir: str = "", encryption_key: bytes | None = None
- ) -> Dict | None:
- serialized = FileSerializable.serialize(self, x, load_dir, encryption_key)
- if serialized is None:
- return None
- serialized["size"] = Path(serialized["name"]).stat().st_size
- return serialized
-
- def style(
- self,
- **kwargs,
- ):
- """
- This method can be used to change the appearance of the file component.
- """
- return Component.style(
- self,
- **kwargs,
- )
-
- def as_example(self, input_data: str | List | None) -> str:
- if input_data is None:
- return ""
- elif isinstance(input_data, list):
- return ", ".join([Path(file).name for file in input_data])
- else:
- return Path(input_data).name
-
-
-@document("change", "style")
-class Dataframe(Changeable, IOComponent, JSONSerializable):
- """
- Accepts or displays 2D input through a spreadsheet-like component for dataframes.
- Preprocessing: passes the uploaded spreadsheet data as a {pandas.DataFrame}, {numpy.array}, {List[List]}, or {List} depending on `type`
- Postprocessing: expects a {pandas.DataFrame}, {numpy.array}, {List[List]}, {List}, a {Dict} with keys `data` (and optionally `headers`), or {str} path to a csv, which is rendered in the spreadsheet.
- Examples-format: a {str} filepath to a csv with data, a pandas dataframe, or a list of lists (excluding headers) where each sublist is a row of data.
- Demos: filter_records, matrix_transpose, tax_calculator
- """
-
- markdown_parser = None
-
- def __init__(
- self,
- value: List[List[Any]] | Callable | None = None,
- *,
- headers: List[str] | None = None,
- row_count: int | Tuple[int, str] = (1, "dynamic"),
- col_count: int | Tuple[int, str] | None = None,
- datatype: str | List[str] = "str",
- type: str = "pandas",
- max_rows: int | None = 20,
- max_cols: int | None = None,
- overflow_row_behaviour: str = "paginate",
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- wrap: bool = False,
- **kwargs,
- ):
- """
- Parameters:
- value: Default value as a 2-dimensional list of values. If callable, the function will be called whenever the app loads to set the initial value of the component.
- headers: List of str header names. If None, no headers are shown.
- row_count: Limit number of rows for input and decide whether user can create new rows. The first element of the tuple is an `int`, the row count; the second should be 'fixed' or 'dynamic', the new row behaviour. If an `int` is passed the rows default to 'dynamic'
- col_count: Limit number of columns for input and decide whether user can create new columns. The first element of the tuple is an `int`, the number of columns; the second should be 'fixed' or 'dynamic', the new column behaviour. If an `int` is passed the columns default to 'dynamic'
- datatype: Datatype of values in sheet. Can be provided per column as a list of strings, or for the entire sheet as a single string. Valid datatypes are "str", "number", "bool", "date", and "markdown".
- type: Type of value to be returned by component. "pandas" for pandas dataframe, "numpy" for numpy array, or "array" for a Python array.
- label: component name in interface.
- max_rows: Maximum number of rows to display at once. Set to None for infinite.
- max_cols: Maximum number of columns to display at once. Set to None for infinite.
- overflow_row_behaviour: If set to "paginate", will create pages for overflow rows. If set to "show_ends", will show initial and final rows and truncate middle rows.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, will allow users to edit the dataframe; if False, can only be used to display data. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- wrap: if True text in table cells will wrap when appropriate, if False the table will scroll horiztonally. Defaults to False.
- """
-
- self.wrap = wrap
- self.row_count = self.__process_counts(row_count)
- self.col_count = self.__process_counts(
- col_count, len(headers) if headers else 3
- )
-
- self.__validate_headers(headers, self.col_count[0])
-
- self.headers = (
- headers if headers is not None else list(range(1, self.col_count[0] + 1))
- )
- self.datatype = (
- datatype if isinstance(datatype, list) else [datatype] * self.col_count[0]
- )
- valid_types = ["pandas", "numpy", "array"]
- if type not in valid_types:
- raise ValueError(
- f"Invalid value for parameter `type`: {type}. Please choose from one of: {valid_types}"
- )
- self.type = type
- values = {
- "str": "",
- "number": 0,
- "bool": False,
- "date": "01/01/1970",
- "markdown": "",
- "html": "",
- }
- column_dtypes = (
- [datatype] * self.col_count[0] if isinstance(datatype, str) else datatype
- )
- self.test_input = [
- [values[c] for c in column_dtypes] for _ in range(self.row_count[0])
- ]
-
- self.max_rows = max_rows
- self.max_cols = max_cols
- self.overflow_row_behaviour = overflow_row_behaviour
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "headers": self.headers,
- "datatype": self.datatype,
- "row_count": self.row_count,
- "col_count": self.col_count,
- "value": self.value,
- "max_rows": self.max_rows,
- "max_cols": self.max_cols,
- "overflow_row_behaviour": self.overflow_row_behaviour,
- "wrap": self.wrap,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- max_rows: int | None = None,
- max_cols: str | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "max_rows": max_rows,
- "max_cols": max_cols,
- "label": label,
- "show_label": show_label,
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def preprocess(self, x: DataframeData):
- """
- Parameters:
- x: 2D array of str, numeric, or bool data
- Returns:
- Dataframe in requested format
- """
- if self.type == "pandas":
- if x.get("headers") is not None:
- return pd.DataFrame(x["data"], columns=x.get("headers"))
- else:
- return pd.DataFrame(x["data"])
- if self.type == "numpy":
- return np.array(x["data"])
- elif self.type == "array":
- return x["data"]
- else:
- raise ValueError(
- "Unknown type: "
- + str(self.type)
- + ". Please choose from: 'pandas', 'numpy', 'array'."
- )
-
- def generate_sample(self):
- return [[1, 2, 3], [4, 5, 6]]
-
- def postprocess(
- self, y: str | pd.DataFrame | np.ndarray | List[List[str | float]] | Dict
- ) -> Dict:
- """
- Parameters:
- y: dataframe in given format
- Returns:
- JSON object with key 'headers' for list of header names, 'data' for 2D array of string or numeric data
- """
- if y is None:
- return self.postprocess(self.test_input)
- if isinstance(y, dict):
- return y
- if isinstance(y, str):
- dataframe = pd.read_csv(y)
- return {
- "headers": list(dataframe.columns),
- "data": Dataframe.__process_markdown(
- dataframe.to_dict(orient="split")["data"], self.datatype
- ),
- }
- if isinstance(y, pd.DataFrame):
- return {
- "headers": list(y.columns), # type: ignore
- "data": Dataframe.__process_markdown(
- y.to_dict(orient="split")["data"], self.datatype # type: ignore
- ),
- }
- if isinstance(y, (np.ndarray, list)):
- if isinstance(y, np.ndarray):
- y = y.tolist()
- assert isinstance(y, list), "output cannot be converted to list"
-
- _headers = self.headers
-
- if len(self.headers) < len(y[0]):
- _headers = [
- *self.headers,
- *list(range(len(self.headers) + 1, len(y[0]) + 1)),
- ]
- elif len(self.headers) > len(y[0]):
- _headers = self.headers[: len(y[0])]
-
- return {
- "headers": _headers,
- "data": Dataframe.__process_markdown(y, self.datatype),
- }
- raise ValueError("Cannot process value as a Dataframe")
-
- @staticmethod
- def __process_counts(count, default=3) -> Tuple[int, str]:
- if count is None:
- return (default, "dynamic")
- if type(count) == int or type(count) == float:
- return (int(count), "dynamic")
- else:
- return count
-
- @staticmethod
- def __validate_headers(headers: List[str] | None, col_count: int):
- if headers is not None and len(headers) != col_count:
- raise ValueError(
- "The length of the headers list must be equal to the col_count int.\nThe column count is set to {cols} but `headers` has {headers} items. Check the values passed to `col_count` and `headers`.".format(
- cols=col_count, headers=len(headers)
- )
- )
-
- @classmethod
- def __process_markdown(cls, data: List[List[Any]], datatype: List[str]):
- if "markdown" not in datatype:
- return data
-
- if cls.markdown_parser is None:
- cls.markdown_parser = (
- MarkdownIt()
- .use(dollarmath_plugin, renderer=utils.tex2svg, allow_digits=False)
- .enable("table")
- )
-
- for i in range(len(data)):
- for j in range(len(data[i])):
- if datatype[j] == "markdown":
- data[i][j] = cls.markdown_parser.render(data[i][j])
-
- return data
-
- def style(
- self,
- **kwargs,
- ):
- """
- This method can be used to change the appearance of the DataFrame component.
- """
- return Component.style(
- self,
- **kwargs,
- )
-
- def as_example(self, input_data: pd.DataFrame | np.ndarray | str | None):
- if input_data is None:
- return ""
- elif isinstance(input_data, pd.DataFrame):
- return input_data.head(n=5).to_dict(orient="split")["data"] # type: ignore
- elif isinstance(input_data, np.ndarray):
- return input_data.tolist()
- return input_data
-
-
-@document("change", "style")
-class Timeseries(Changeable, IOComponent, JSONSerializable):
- """
- Creates a component that can be used to upload/preview timeseries csv files or display a dataframe consisting of a time series graphically.
- Preprocessing: passes the uploaded timeseries data as a {pandas.DataFrame} into the function
- Postprocessing: expects a {pandas.DataFrame} or {str} path to a csv to be returned, which is then displayed as a timeseries graph
- Examples-format: a {str} filepath of csv data with time series data.
- Demos: fraud_detector
- """
-
- def __init__(
- self,
- value: str | Callable | None = None,
- *,
- x: str | None = None,
- y: str | List[str] | None = None,
- colors: List[str] | None = None,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: File path for the timeseries csv file. If callable, the function will be called whenever the app loads to set the initial value of the component.
- x: Column name of x (time) series. None if csv has no headers, in which case first column is x series.
- y: Column name of y series, or list of column names if multiple series. None if csv has no headers, in which case every column after first is a y series.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- colors: an ordered list of colors to use for each line plot
- show_label: if True, will display label.
- interactive: if True, will allow users to upload a timeseries csv; if False, can only be used to display timeseries data. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.x = x
- if isinstance(y, str):
- y = [y]
- self.y = y
- self.colors = colors
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "x": self.x,
- "y": self.y,
- "value": self.value,
- "colors": self.colors,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- colors: List[str] | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "colors": colors,
- "label": label,
- "show_label": show_label,
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def preprocess(self, x: Dict | None) -> pd.DataFrame | None:
- """
- Parameters:
- x: Dict with keys 'data': 2D array of str, numeric, or bool data, 'headers': list of strings for header names, 'range': optional two element list designating start of end of subrange.
- Returns:
- Dataframe of timeseries data
- """
- if x is None:
- return x
- elif x.get("is_file"):
- dataframe = pd.read_csv(x["name"])
- else:
- dataframe = pd.DataFrame(data=x["data"], columns=x["headers"])
- if x.get("range") is not None:
- dataframe = dataframe.loc[dataframe[self.x or 0] >= x["range"][0]]
- dataframe = dataframe.loc[dataframe[self.x or 0] <= x["range"][1]]
- return dataframe
-
- def generate_sample(self):
- return {
- "data": [[1] + [2] * len(self.y or [])] * 4,
- "headers": [self.x] + (self.y or []),
- }
-
- def postprocess(self, y: str | pd.DataFrame | None) -> Dict | None:
- """
- Parameters:
- y: csv or dataframe with timeseries data
- Returns:
- JSON object with key 'headers' for list of header names, 'data' for 2D array of string or numeric data
- """
- if y is None:
- return None
- if isinstance(y, str):
- dataframe = pd.read_csv(y)
- return {
- "headers": dataframe.columns.values.tolist(),
- "data": dataframe.values.tolist(),
- }
- if isinstance(y, pd.DataFrame):
- return {"headers": y.columns.values.tolist(), "data": y.values.tolist()}
- raise ValueError("Cannot process value as Timeseries data")
-
- def style(
- self,
- **kwargs,
- ):
- """
- This method can be used to change the appearance of the TimeSeries component.
- """
- return Component.style(
- self,
- **kwargs,
- )
-
-
-@document()
-class State(IOComponent, SimpleSerializable):
- """
- Special hidden component that stores session state across runs of the demo by the
- same user. The value of the State variable is cleared when the user refreshes the page.
-
- Preprocessing: No preprocessing is performed
- Postprocessing: No postprocessing is performed
- Demos: chatbot_demo, blocks_simple_squares
- Guides: creating_a_chatbot, real_time_speech_recognition
- """
-
- allow_string_shortcut = False
-
- def __init__(
- self,
- value: Any = None,
- **kwargs,
- ):
- """
- Parameters:
- value: the initial value of the state. If callable, the function will be called whenever the app loads to set the initial value of the component.
- """
- self.stateful = True
- IOComponent.__init__(self, value=deepcopy(value), **kwargs)
-
- def style(self):
- return self
-
-
-class Variable(State):
- """Variable was renamed to State. This class is kept for backwards compatibility."""
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
-
- def get_block_name(self):
- return "state"
-
-
-@document("click", "style")
-class Button(Clickable, IOComponent, SimpleSerializable):
- """
- Used to create a button, that can be assigned arbitrary click() events. The label (value) of the button can be used as an input or set via the output of a function.
-
- Preprocessing: passes the button value as a {str} into the function
- Postprocessing: expects a {str} to be returned from a function, which is set as the label of the button
- Demos: blocks_inputs, blocks_kinematics
- """
-
- def __init__(
- self,
- value: str | Callable = "Run",
- *,
- variant: str = "secondary",
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Default text for the button to display. If callable, the function will be called whenever the app loads to set the initial value of the component.
- variant: 'primary' for main call-to-action, 'secondary' for a more subdued style
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- IOComponent.__init__(
- self, visible=visible, elem_id=elem_id, value=value, **kwargs
- )
- self.variant = variant
-
- def get_config(self):
- return {
- "value": self.value,
- "variant": self.variant,
- **Component.get_config(self),
- }
-
- @staticmethod
- def update(
- value: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- variant: str | None = None,
- visible: bool | None = None,
- ):
- return {
- "variant": variant,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
-
- def style(self, *, full_width: bool | None = None, **kwargs):
- """
- This method can be used to change the appearance of the button component.
- Parameters:
- full_width: If True, will expand to fill parent container.
- """
- if full_width is not None:
- self._style["full_width"] = full_width
-
- return Component.style(self, **kwargs)
-
-
-@document("click", "upload", "style")
-class UploadButton(
- Clickable, Uploadable, IOComponent, FileSerializable, TempFileManager
-):
- """
- Used to create an upload button, when cicked allows a user to upload files that satisfy the specified file type or generic files (if file_type not set).
- Preprocessing: passes the uploaded file as a {file-object} or {List[file-object]} depending on `file_count` (or a {bytes}/{List{bytes}} depending on `type`)
- Postprocessing: expects function to return a {str} path to a file, or {List[str]} consisting of paths to files.
- Examples-format: a {str} path to a local file that populates the component.
- Demos: upload_button
- """
-
- def __init__(
- self,
- label: str = "Upload a File",
- value: str | List[str] | Callable | None = None,
- *,
- visible: bool = True,
- elem_id: str | None = None,
- type: str = "file",
- file_count: str = "single",
- file_types: List[str] | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Default text for the button to display.
- type: Type of value to be returned by component. "file" returns a temporary file object whose path can be retrieved by file_obj.name and original filename can be retrieved with file_obj.orig_name, "binary" returns an bytes object.
- file_count: if single, allows user to upload one file. If "multiple", user uploads multiple files. If "directory", user uploads all files in selected directory. Return type will be list for each file in case of "multiple" or "directory".
- file_types: List of type of files to be uploaded. "file" allows any file to be uploaded, "image" allows only image files to be uploaded, "audio" allows only audio files to be uploaded, "video" allows only video files to be uploaded, "text" allows only text files to be uploaded.
- label: Text to display on the button. Defaults to "Upload a File".
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.type = type
- self.file_count = file_count
- self.file_types = file_types
- self.label = label
- TempFileManager.__init__(self)
- IOComponent.__init__(
- self, label=label, visible=visible, elem_id=elem_id, value=value, **kwargs
- )
-
- def get_config(self):
- return {
- "label": self.label,
- "value": self.value,
- "file_count": self.file_count,
- "file_types": self.file_types,
- **Component.get_config(self),
- }
-
- @staticmethod
- def update(
- value: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- interactive: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "interactive": interactive,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def preprocess(
- self, x: List[Dict[str, Any]] | None
- ) -> bytes | tempfile._TemporaryFileWrapper | List[
- bytes | tempfile._TemporaryFileWrapper
- ] | None:
- """
- Parameters:
- x: List of JSON objects with filename as 'name' property and base64 data as 'data' property
- Returns:
- File objects in requested format
- """
- if x is None:
- return None
-
- def process_single_file(f) -> bytes | tempfile._TemporaryFileWrapper:
- file_name, data, is_file = (
- f["name"],
- f["data"],
- f.get("is_file", False),
- )
- if self.type == "file":
- if is_file:
- temp_file_path = self.make_temp_copy_if_needed(file_name)
- file = tempfile.NamedTemporaryFile(delete=False)
- file.name = temp_file_path
- file.orig_name = file_name # type: ignore
- else:
- file = processing_utils.decode_base64_to_file(
- data, file_path=file_name
- )
- file.orig_name = file_name # type: ignore
- return file
- elif self.type == "bytes":
- if is_file:
- with open(file_name, "rb") as file_data:
- return file_data.read()
- return processing_utils.decode_base64_to_binary(data)[0]
- else:
- raise ValueError(
- "Unknown type: "
- + str(self.type)
- + ". Please choose from: 'file', 'bytes'."
- )
-
- if self.file_count == "single":
- if isinstance(x, list):
- return process_single_file(x[0])
- else:
- return process_single_file(x)
- else:
- if isinstance(x, list):
- return [process_single_file(f) for f in x]
- else:
- return process_single_file(x)
-
- def generate_sample(self):
- return deepcopy(media_data.BASE64_FILE)
-
- def serialize(
- self, x: str | None, load_dir: str = "", encryption_key: bytes | None = None
- ) -> Dict | None:
- serialized = FileSerializable.serialize(self, x, load_dir, encryption_key)
- if serialized is None:
- return None
- serialized["size"] = Path(serialized["name"]).stat().st_size
- return serialized
-
- def style(self, *, full_width: bool | None = None, **kwargs):
- """
- This method can be used to change the appearance of the button component.
- Parameters:
- full_width: If True, will expand to fill parent container.
- """
- if full_width is not None:
- self._style["full_width"] = full_width
-
- return Component.style(self, **kwargs)
-
-
-@document("change", "submit", "style")
-class ColorPicker(Changeable, Submittable, IOComponent, SimpleSerializable):
- """
- Creates a color picker for user to select a color as string input.
- Preprocessing: passes selected color value as a {str} into the function.
- Postprocessing: expects a {str} returned from function and sets color picker value to it.
- Examples-format: a {str} with a hexadecimal representation of a color, e.g. "#ff0000" for red.
- Demos: color_picker, color_generator
- """
-
- def __init__(
- self,
- value: str | Callable | None = None,
- *,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- interactive: bool | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: default text to provide in color picker. If callable, the function will be called whenever the app loads to set the initial value of the component.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.cleared_value = "#000000"
- self.test_input = value
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- interactive=interactive,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- interactive: bool | None = None,
- ):
- updated_config = {
- "value": value,
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "__type__": "update",
- }
- return IOComponent.add_interactive_to_config(updated_config, interactive)
-
- def preprocess(self, x: str | None) -> str | None:
- """
- Any preprocessing needed to be performed on function input.
- Parameters:
- x: text
- Returns:
- text
- """
- if x is None:
- return None
- else:
- return str(x)
-
- def generate_sample(self) -> str:
- return "#000000"
-
- def postprocess(self, y: str | None) -> str | None:
- """
- Any postprocessing needed to be performed on function output.
- Parameters:
- y: text
- Returns:
- text
- """
- if y is None:
- return None
- else:
- return str(y)
-
-
-############################
-# Only Output Components
-############################
-
-
-@document("change", "style")
-class Label(Changeable, IOComponent, JSONSerializable):
- """
- Displays a classification label, along with confidence scores of top categories, if provided.
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a {Dict[str, float]} of classes and confidences, or {str} with just the class or an {int}/{float} for regression outputs, or a {str} path to a .json file containing a json dictionary in the structure produced by Label.postprocess().
-
- Demos: main_note, titanic_survival
- Guides: Gradio_and_ONNX_on_Hugging_Face, image_classification_in_pytorch, image_classification_in_tensorflow, image_classification_with_vision_transformers, building_a_pictionary_app
- """
-
- CONFIDENCES_KEY = "confidences"
-
- def __init__(
- self,
- value: Dict[str, float] | str | float | Callable | None = None,
- *,
- num_top_classes: int | None = None,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- visible: bool = True,
- elem_id: str | None = None,
- color: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Default value to show in the component. If a str or number is provided, simply displays the string or number. If a {Dict[str, float]} of classes and confidences is provided, displays the top class on top and the `num_top_classes` below, along with their confidence bars. If callable, the function will be called whenever the app loads to set the initial value of the component.
- num_top_classes: number of most confident classes to show.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- color: The background color of the label (either a valid css color name or hexadecimal string).
- """
- self.num_top_classes = num_top_classes
- self.color = color
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "num_top_classes": self.num_top_classes,
- "value": self.value,
- "color": self.color,
- **IOComponent.get_config(self),
- }
-
- def postprocess(self, y: Dict[str, float] | str | float | None) -> Dict | None:
- """
- Parameters:
- y: a dictionary mapping labels to confidence value, or just a string/numerical label by itself
- Returns:
- Object with key 'label' representing primary label, and key 'confidences' representing a list of label-confidence pairs
- """
- if y is None or y == {}:
- return None
- if isinstance(y, str) and y.endswith(".json") and Path(y).exists():
- return self.serialize(y)
- if isinstance(y, (str, float, int)):
- return {"label": str(y)}
- if isinstance(y, dict):
- if "confidences" in y and isinstance(y["confidences"], dict):
- y = y["confidences"]
- y = {c["label"]: c["confidence"] for c in y}
- sorted_pred = sorted(y.items(), key=operator.itemgetter(1), reverse=True)
- if self.num_top_classes is not None:
- sorted_pred = sorted_pred[: self.num_top_classes]
- return {
- "label": sorted_pred[0][0],
- "confidences": [
- {"label": pred[0], "confidence": pred[1]} for pred in sorted_pred
- ],
- }
- raise ValueError(
- "The `Label` output interface expects one of: a string label, or an int label, a "
- "float label, or a dictionary whose keys are labels and values are confidences. "
- "Instead, got a {}".format(type(y))
- )
-
- @staticmethod
- def update(
- value: Dict[str, float]
- | str
- | float
- | Literal[_Keywords.NO_VALUE]
- | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- color: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- ):
- # If color is not specified (NO_VALUE) map it to None so that
- # it gets filtered out in postprocess. This will mean the color
- # will not be updated in the front-end
- if color is _Keywords.NO_VALUE:
- color = None
- # If the color was specified by the developer as None
- # Map is so that the color is updated to be transparent,
- # e.g. no background default state.
- elif color is None:
- color = "transparent"
- updated_config = {
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "color": color,
- "__type__": "update",
- }
- return updated_config
-
- def style(
- self,
- *,
- container: bool | None = None,
- ):
- """
- This method can be used to change the appearance of the label component.
- Parameters:
- container: If True, will add a container to the label - providing some extra padding around the border.
- """
- return Component.style(self, container=container)
-
-
-@document("change", "style")
-class HighlightedText(Changeable, IOComponent, JSONSerializable):
- """
- Displays text that contains spans that are highlighted by category or numerical value.
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a {List[Tuple[str, float | str]]]} consisting of spans of text and their associated labels, or a {Dict} with two keys: (1) "text" whose value is the complete text, and "entities", which is a list of dictionaries, each of which have the keys: "entity" (consisting of the entity label), "start" (the character index where the label starts), and "end" (the character index where the label ends). Entities should not overlap.
-
- Demos: diff_texts, text_analysis
- Guides: named_entity_recognition
- """
-
- def __init__(
- self,
- value: List[Tuple[str, str | float | None]] | Dict | Callable | None = None,
- *,
- color_map: Dict[str, str]
- | None = None, # Parameter moved to HighlightedText.style()
- show_legend: bool = False,
- combine_adjacent: bool = False,
- adjacent_separator: str = "",
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Default value to show. If callable, the function will be called whenever the app loads to set the initial value of the component.
- show_legend: whether to show span categories in a separate legend or inline.
- combine_adjacent: If True, will merge the labels of adjacent tokens belonging to the same category.
- adjacent_separator: Specifies the separator to be used between tokens if combine_adjacent is True.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.color_map = color_map
- if color_map is not None:
- warnings.warn(
- "The 'color_map' parameter has been moved from the constructor to `HighlightedText.style()` ",
- )
- self.show_legend = show_legend
- self.combine_adjacent = combine_adjacent
- self.adjacent_separator = adjacent_separator
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "color_map": self.color_map,
- "show_legend": self.show_legend,
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: List[Tuple[str, str | float | None]]
- | Dict
- | Literal[_Keywords.NO_VALUE]
- | None,
- color_map: Dict[str, str] | None = None,
- show_legend: bool | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "color_map": color_map,
- "show_legend": show_legend,
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return updated_config
-
- def postprocess(
- self, y: List[Tuple[str, str | float | None]] | Dict | None
- ) -> List[Tuple[str, str | float | None]] | None:
- """
- Parameters:
- y: List of (word, category) tuples
- Returns:
- List of (word, category) tuples
- """
- if y is None:
- return None
- if isinstance(y, dict):
- try:
- text = y["text"]
- entities = y["entities"]
- except KeyError:
- raise ValueError(
- "Expected a dictionary with keys 'text' and 'entities' for the value of the HighlightedText component."
- )
- if len(entities) == 0:
- y = [(text, None)]
- else:
- list_format = []
- index = 0
- entities = sorted(entities, key=lambda x: x["start"])
- for entity in entities:
- list_format.append((text[index : entity["start"]], None))
- list_format.append(
- (text[entity["start"] : entity["end"]], entity["entity"])
- )
- index = entity["end"]
- list_format.append((text[index:], None))
- y = list_format
- if self.combine_adjacent:
- output = []
- running_text, running_category = None, None
- for text, category in y:
- if running_text is None:
- running_text = text
- running_category = category
- elif category == running_category:
- running_text += self.adjacent_separator + text
- elif not text:
- # Skip fully empty item, these get added in processing
- # of dictionaries.
- pass
- else:
- output.append((running_text, running_category))
- running_text = text
- running_category = category
- if running_text is not None:
- output.append((running_text, running_category))
- return output
- else:
- return y
-
- def style(
- self,
- *,
- color_map: Dict[str, str] | None = None,
- container: bool | None = None,
- **kwargs,
- ):
- """
- This method can be used to change the appearance of the HighlightedText component.
- Parameters:
- color_map: Map between category and respective colors.
- container: If True, will place the component in a container - providing some extra padding around the border.
- """
- if color_map is not None:
- self._style["color_map"] = color_map
-
- return Component.style(self, container=container, **kwargs)
-
-
-@document("change", "style")
-class JSON(Changeable, IOComponent, JSONSerializable):
- """
- Used to display arbitrary JSON output prettily.
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a valid JSON {str} -- or a {list} or {dict} that is JSON serializable.
-
- Demos: zip_to_json, blocks_xray
- """
-
- def __init__(
- self,
- value: str | Callable | None = None,
- *,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Default value. If callable, the function will be called whenever the app loads to set the initial value of the component.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- interactive: bool | None = None,
- ):
- updated_config = {
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return updated_config
-
- def postprocess(self, y: Dict | List | str | None) -> Dict | List | None:
- """
- Parameters:
- y: JSON output
- Returns:
- JSON output
- """
- if y is None:
- return None
- if isinstance(y, str):
- return json.loads(y)
- else:
- return y
-
- def style(self, *, container: bool | None = None, **kwargs):
- """
- This method can be used to change the appearance of the JSON component.
- Parameters:
- container: If True, will place the JSON in a container - providing some extra padding around the border.
- """
- return Component.style(self, container=container, **kwargs)
-
-
-@document("change")
-class HTML(Changeable, IOComponent, SimpleSerializable):
- """
- Used to display arbitrary HTML output.
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a valid HTML {str}.
-
- Demos: text_analysis
- Guides: key_features
- """
-
- def __init__(
- self,
- value: str | Callable = "",
- *,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Default value. If callable, the function will be called whenever the app loads to set the initial value of the component.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return updated_config
-
- def style(self):
- return self
-
-
-@document("style")
-class Gallery(IOComponent, TempFileManager, FileSerializable):
- """
- Used to display a list of images as a gallery that can be scrolled through.
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a list of images in any format, {List[numpy.array | PIL.Image | str]}, or a {List} of (image, {str} caption) tuples and displays them.
-
- Demos: fake_gan
- """
-
- def __init__(
- self,
- value: List[np.ndarray | _Image.Image | str] | Callable | None = None,
- *,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: List of images to display in the gallery by default. If callable, the function will be called whenever the app loads to set the initial value of the component.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- TempFileManager.__init__(self)
- super().__init__(
- label=label,
- every=every,
- show_label=show_label,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return updated_config
-
- def get_config(self):
- return {
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- def postprocess(
- self,
- y: List[np.ndarray | _Image.Image | str]
- | List[Tuple[np.ndarray | _Image.Image | str, str]]
- | None,
- ) -> List[str]:
- """
- Parameters:
- y: list of images, or list of (image, caption) tuples
- Returns:
- list of string file paths to images in temp directory
- """
- if y is None:
- return []
- output = []
- for img in y:
- caption = None
- if isinstance(img, tuple) or isinstance(img, list):
- img, caption = img
- if isinstance(img, np.ndarray):
- file = processing_utils.save_array_to_file(img)
- file_path = str(Path(file.name).resolve())
- self.temp_files.add(file_path)
- elif isinstance(img, _Image.Image):
- file = processing_utils.save_pil_to_file(img)
- file_path = str(Path(file.name).resolve())
- self.temp_files.add(file_path)
- elif isinstance(img, str):
- if utils.validate_url(img):
- file_path = img
- else:
- file_path = self.make_temp_copy_if_needed(img)
- else:
- raise ValueError(f"Cannot process type as image: {type(img)}")
-
- if caption is not None:
- output.append(
- [{"name": file_path, "data": None, "is_file": True}, caption]
- )
- else:
- output.append({"name": file_path, "data": None, "is_file": True})
-
- return output
-
- def style(
- self,
- *,
- grid: int | Tuple | None = None,
- height: str | None = None,
- container: bool | None = None,
- **kwargs,
- ):
- """
- This method can be used to change the appearance of the gallery component.
- Parameters:
- grid: Represents the number of images that should be shown in one row, for each of the six standard screen sizes (<576px, <768px, <992px, <1200px, <1400px, >1400px). if fewer that 6 are given then the last will be used for all subsequent breakpoints
- height: Height of the gallery.
- container: If True, will place gallery in a container - providing some extra padding around the border.
- """
- if grid is not None:
- self._style["grid"] = grid
- if height is not None:
- self._style["height"] = height
-
- return Component.style(self, container=container, **kwargs)
-
- def deserialize(
- self, x: Any, save_dir: str = "", encryption_key: bytes | None = None
- ) -> None | str:
- if x is None:
- return None
- gallery_path = Path(save_dir) / str(uuid.uuid4())
- gallery_path.mkdir(exist_ok=True, parents=True)
- captions = {}
- for img_data in x:
- if isinstance(img_data, list) or isinstance(img_data, tuple):
- img_data, caption = img_data
- else:
- caption = None
- name = FileSerializable.deserialize(self, img_data, gallery_path)
- captions[name] = caption
- captions_file = gallery_path / "captions.json"
- with captions_file.open("w") as captions_json:
- json.dump(captions, captions_json)
- return str(gallery_path.resolve())
-
- def serialize(self, x: Any, load_dir: str = "", called_directly: bool = False):
- files = []
- captions_file = Path(x) / "captions.json"
- with captions_file.open("r") as captions_json:
- captions = json.load(captions_json)
- for file_name, caption in captions.items():
- img = FileSerializable.serialize(self, file_name)
- files.append([img, caption])
- return files
-
-
-class Carousel(IOComponent, Changeable, SimpleSerializable):
- """
- Deprecated Component
- """
-
- def __init__(
- self,
- *args,
- **kwargs,
- ):
- raise DeprecationWarning(
- "The Carousel component is deprecated. Please consider using the Gallery "
- "component, which can be used to display images (and optional captions).",
- )
-
-
-@document("change", "style")
-class Chatbot(Changeable, IOComponent, JSONSerializable):
- """
- Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images.
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a {List[Tuple[str, str]]}, a list of tuples with user inputs and responses as strings of HTML.
-
- Demos: chatbot_demo
- """
-
- def __init__(
- self,
- value: List[Tuple[str, str]] | Callable | None = None,
- color_map: Dict[str, str] | None = None, # Parameter moved to Chatbot.style()
- *,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Default value to show in chatbot. If callable, the function will be called whenever the app loads to set the initial value of the component.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- if color_map is not None:
- warnings.warn(
- "The 'color_map' parameter has been moved from the constructor to `Chatbot.style()` ",
- )
- self.color_map = color_map
- self.md = MarkdownIt()
-
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "value": self.value,
- "color_map": self.color_map,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- color_map: Tuple[str, str] | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "color_map": color_map,
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return updated_config
-
- def postprocess(self, y: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- """
- Parameters:
- y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
- Returns:
- List of tuples representing the message and response. Each message and response will be a string of HTML.
- """
- if y is None:
- return []
- for i, (message, response) in enumerate(y):
- y[i] = (self.md.render(message), self.md.render(response))
- return y
-
- def style(self, *, color_map: Tuple[str, str] | None = None, **kwargs):
- """
- This method can be used to change the appearance of the Chatbot component.
- Parameters:
- color_map: Tuple containing colors to apply to user and response chat bubbles.
- Returns:
-
- """
- if color_map is not None:
- self._style["color_map"] = color_map
-
- return Component.style(
- self,
- **kwargs,
- )
-
-
-@document("change", "edit", "clear", "style")
-class Model3D(
- Changeable, Editable, Clearable, IOComponent, FileSerializable, TempFileManager
-):
- """
- Component allows users to upload or view 3D Model files (.obj, .glb, or .gltf).
- Preprocessing: This component passes the uploaded file as a {str} filepath.
- Postprocessing: expects function to return a {str} path to a file of type (.obj, glb, or .gltf)
-
- Demos: model3D
- Guides: how_to_use_3D_model_component
- """
-
- def __init__(
- self,
- value: str | Callable | None = None,
- *,
- clear_color: List[float] | None = None,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: path to (.obj, glb, or .gltf) file to show in model3D viewer. If callable, the function will be called whenever the app loads to set the initial value of the component.
- clear_color: background color of scene
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.clear_color = clear_color or [0.2, 0.2, 0.2, 1.0]
- TempFileManager.__init__(self)
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "clearColor": self.clear_color,
- "value": self.value,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return updated_config
-
- def preprocess(self, x: Dict[str, str] | None) -> str | None:
- """
- Parameters:
- x: JSON object with filename as 'name' property and base64 data as 'data' property
- Returns:
- string file path to temporary file with the 3D image model
- """
- if x is None:
- return x
- file_name, file_data, is_file = (
- x["name"],
- x["data"],
- x.get("is_file", False),
- )
- if is_file:
- temp_file_path = self.make_temp_copy_if_needed(file_name)
- else:
- temp_file = processing_utils.decode_base64_to_file(
- file_data, file_path=file_name
- )
- temp_file_path = temp_file.name
-
- return temp_file_path
-
- def generate_sample(self):
- return media_data.BASE64_MODEL3D
-
- def postprocess(self, y: str | None) -> Dict[str, str] | None:
- """
- Parameters:
- y: path to the model
- Returns:
- file name mapped to base64 url data
- """
- if y is None:
- return y
- data = {
- "name": self.make_temp_copy_if_needed(y),
- "data": None,
- "is_file": True,
- }
- return data
-
- def style(self, **kwargs):
- """
- This method can be used to change the appearance of the Model3D component.
- """
- return Component.style(
- self,
- **kwargs,
- )
-
- def as_example(self, input_data: str | None) -> str:
- return Path(input_data).name if input_data else ""
-
-
-@document("change", "clear")
-class Plot(Changeable, Clearable, IOComponent, JSONSerializable):
- """
- Used to display various kinds of plots (matplotlib, plotly, or bokeh are supported)
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects either a {matplotlib.figure.Figure}, a {plotly.graph_objects._figure.Figure}, or a {dict} corresponding to a bokeh plot (json_item format)
-
- Demos: altair_plot, outbreak_forecast, blocks_kinematics, stock_forecast, map_airbnb
- Guides: plot_component_for_maps
- """
-
- def __init__(
- self,
- value: Callable | None | pd.DataFrame = None,
- *,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Optionally, supply a default plot object to display, must be a matplotlib, plotly, altair, or bokeh figure, or a callable. If callable, the function will be called whenever the app loads to set the initial value of the component.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- visible=visible,
- elem_id=elem_id,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {"value": self.value, **IOComponent.get_config(self)}
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return updated_config
-
- def postprocess(self, y) -> Dict[str, str] | None:
- """
- Parameters:
- y: plot data
- Returns:
- plot type mapped to plot base64 data
- """
- if y is None:
- return None
- if isinstance(y, (ModuleType, matplotlib.figure.Figure)):
- dtype = "matplotlib"
- out_y = processing_utils.encode_plot_to_base64(y)
- elif isinstance(y, dict):
- dtype = "bokeh"
- out_y = json.dumps(y)
- else:
- is_altair = "altair" in y.__module__
- if is_altair:
- dtype = "altair"
- else:
- dtype = "plotly"
- out_y = y.to_json()
- return {"type": dtype, "plot": out_y}
-
- def style(self, container: bool | None = None):
- return Component.style(
- self,
- container=container,
- )
-
-
-class AltairPlot:
- @staticmethod
- def create_legend(position, title):
- if position == "none":
- legend = None
- else:
- position = {"orient": position} if position else {}
- legend = {"title": title, **position}
-
- return legend
-
- @staticmethod
- def create_scale(limit):
- return alt.Scale(domain=limit) if limit else alt.Undefined
-
-
-@document("change", "clear")
-class ScatterPlot(Plot):
- """
- Create a scatter plot.
-
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a pandas dataframe with the data to plot.
-
- Demos: native_plots
- Guides: creating_a_dashboard_from_bigquery_data
- """
-
- def __init__(
- self,
- value: pd.DataFrame | Callable | None = None,
- x: str | None = None,
- y: str | None = None,
- *,
- color: str | None = None,
- size: str | None = None,
- shape: str | None = None,
- title: str | None = None,
- tooltip: List[str] | str | None = None,
- x_title: str | None = None,
- y_title: str | None = None,
- color_legend_title: str | None = None,
- size_legend_title: str | None = None,
- shape_legend_title: str | None = None,
- color_legend_position: str | None = None,
- size_legend_position: str | None = None,
- shape_legend_position: str | None = None,
- height: int | None = None,
- width: int | None = None,
- x_lim: List[int | float] | None = None,
- y_lim: List[int | float] | None = None,
- caption: str | None = None,
- interactive: bool | None = True,
- label: str | None = None,
- every: float | None = None,
- show_label: bool = True,
- visible: bool = True,
- elem_id: str | None = None,
- ):
- """
- Parameters:
- value: The pandas dataframe containing the data to display in a scatter plot, or a callable. If callable, the function will be called whenever the app loads to set the initial value of the component.
- x: Column corresponding to the x axis.
- y: Column corresponding to the y axis.
- color: The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values.
- size: The column used to determine the point size. Should contain numeric data so that gradio can map the data to the point size.
- shape: The column used to determine the point shape. Should contain categorical data. Gradio will map each unique value to a different shape.
- title: The title to display on top of the chart.
- tooltip: The column (or list of columns) to display on the tooltip when a user hovers a point on the plot.
- x_title: The title given to the x axis. By default, uses the value of the x parameter.
- y_title: The title given to the y axis. By default, uses the value of the y parameter.
- color_legend_title: The title given to the color legend. By default, uses the value of color parameter.
- size_legend_title: The title given to the size legend. By default, uses the value of the size parameter.
- shape_legend_title: The title given to the shape legend. By default, uses the value of the shape parameter.
- color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
- size_legend_position: The position of the size legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
- shape_legend_position: The position of the shape legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
- height: The height of the plot in pixels.
- width: The width of the plot in pixels.
- x_lim: A tuple or list containing the limits for the x-axis, specified as [x_min, x_max].
- y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].
- caption: The (optional) caption to display below the plot.
- interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.
- label: The (optional) label to display on the top left corner of the plot.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: Whether the label should be displayed.
- visible: Whether the plot should be visible.
- elem_id: Unique id used for custom css targetting.
- """
- self.x = x
- self.y = y
- self.color = color
- self.size = size
- self.shape = shape
- self.tooltip = tooltip
- self.title = title
- self.x_title = x_title
- self.y_title = y_title
- self.color_legend_title = color_legend_title
- self.color_legend_position = color_legend_position
- self.size_legend_title = size_legend_title
- self.size_legend_position = size_legend_position
- self.shape_legend_title = shape_legend_title
- self.shape_legend_position = shape_legend_position
- self.caption = caption
- self.interactive_chart = interactive
- self.width = width
- self.height = height
- self.x_lim = x_lim
- self.y_lim = y_lim
- super().__init__(
- value=value,
- label=label,
- every=every,
- show_label=show_label,
- visible=visible,
- elem_id=elem_id,
- )
-
- def get_config(self):
- config = super().get_config()
- config["caption"] = self.caption
- return config
-
- def get_block_name(self) -> str:
- return "plot"
-
- @staticmethod
- def update(
- value: DataFrame | Dict | Literal[_Keywords.NO_VALUE] = _Keywords.NO_VALUE,
- x: str | None = None,
- y: str | None = None,
- color: str | None = None,
- size: str | None = None,
- shape: str | None = None,
- title: str | None = None,
- tooltip: List[str] | str | None = None,
- x_title: str | None = None,
- y_title: str | None = None,
- color_legend_title: str | None = None,
- size_legend_title: str | None = None,
- shape_legend_title: str | None = None,
- color_legend_position: str | None = None,
- size_legend_position: str | None = None,
- shape_legend_position: str | None = None,
- height: int | None = None,
- width: int | None = None,
- x_lim: List[int | float] | None = None,
- y_lim: List[int | float] | None = None,
- interactive: bool | None = None,
- caption: str | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- ):
- """Update an existing plot component.
-
- If updating any of the plot properties (color, size, etc) the value, x, and y parameters must be specified.
-
- Parameters:
- value: The pandas dataframe containing the data to display in a scatter plot.
- x: Column corresponding to the x axis.
- y: Column corresponding to the y axis.
- color: The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values.
- size: The column used to determine the point size. Should contain numeric data so that gradio can map the data to the point size.
- shape: The column used to determine the point shape. Should contain categorical data. Gradio will map each unique value to a different shape.
- title: The title to display on top of the chart.
- tooltip: The column (or list of columns) to display on the tooltip when a user hovers a point on the plot.
- x_title: The title given to the x axis. By default, uses the value of the x parameter.
- y_title: The title given to the y axis. By default, uses the value of the y parameter.
- color_legend_title: The title given to the color legend. By default, uses the value of color parameter.
- size_legend_title: The title given to the size legend. By default, uses the value of the size parameter.
- shape_legend_title: The title given to the shape legend. By default, uses the value of the shape parameter.
- color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
- size_legend_position: The position of the size legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
- shape_legend_position: The position of the shape legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
- height: The height of the plot in pixels.
- width: The width of the plot in pixels.
- x_lim: A tuple or list containing the limits for the x-axis, specified as [x_min, x_max].
- y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].
- interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.
- caption: The (optional) caption to display below the plot.
- label: The (optional) label to display in the top left corner of the plot.
- show_label: Whether the label should be displayed.
- visible: Whether the plot should be visible.
- """
- properties = [
- x,
- y,
- color,
- size,
- shape,
- title,
- tooltip,
- x_title,
- y_title,
- color_legend_title,
- size_legend_title,
- shape_legend_title,
- color_legend_position,
- size_legend_position,
- shape_legend_position,
- interactive,
- height,
- width,
- x_lim,
- y_lim,
- ]
- if any(properties):
- if not isinstance(value, pd.DataFrame):
- raise ValueError(
- "In order to update plot properties the value parameter "
- "must be provided, and it must be a Dataframe. Please pass a value "
- "parameter to gr.ScatterPlot.update."
- )
- if x is None or y is None:
- raise ValueError(
- "In order to update plot properties, the x and y axis data "
- "must be specified. Please pass valid values for x an y to "
- "gr.ScatterPlot.update."
- )
- chart = ScatterPlot.create_plot(value, *properties)
- value = {"type": "altair", "plot": chart.to_json(), "chart": "scatter"}
-
- updated_config = {
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "caption": caption,
- "__type__": "update",
- }
- return updated_config
-
- @staticmethod
- def create_plot(
- value: pd.DataFrame,
- x: str,
- y: str,
- color: str | None = None,
- size: str | None = None,
- shape: str | None = None,
- title: str | None = None,
- tooltip: List[str] | str | None = None,
- x_title: str | None = None,
- y_title: str | None = None,
- color_legend_title: str | None = None,
- size_legend_title: str | None = None,
- shape_legend_title: str | None = None,
- color_legend_position: str | None = None,
- size_legend_position: str | None = None,
- shape_legend_position: str | None = None,
- height: int | None = None,
- width: int | None = None,
- x_lim: List[int | float] | None = None,
- y_lim: List[int | float] | None = None,
- interactive: bool | None = True,
- ):
- """Helper for creating the scatter plot."""
- interactive = True if interactive is None else interactive
- encodings = dict(
- x=alt.X(
- x, # type: ignore
- title=x_title or x, # type: ignore
- scale=AltairPlot.create_scale(x_lim), # type: ignore
- ), # ignore: type
- y=alt.Y(
- y, # type: ignore
- title=y_title or y, # type: ignore
- scale=AltairPlot.create_scale(y_lim), # type: ignore
- ),
- )
- properties = {}
- if title:
- properties["title"] = title
- if height:
- properties["height"] = height
- if width:
- properties["width"] = width
- if color:
- if is_numeric_dtype(value[color]):
- domain = [value[color].min(), value[color].max()]
- range_ = [0, 1]
- type_ = "quantitative"
- else:
- domain = value[color].unique().tolist()
- range_ = list(range(len(domain)))
- type_ = "nominal"
-
- encodings["color"] = {
- "field": color,
- "type": type_,
- "legend": AltairPlot.create_legend(
- position=color_legend_position, title=color_legend_title or color
- ),
- "scale": {"domain": domain, "range": range_},
- }
- if tooltip:
- encodings["tooltip"] = tooltip
- if size:
- encodings["size"] = {
- "field": size,
- "type": "quantitative" if is_numeric_dtype(value[size]) else "nominal",
- "legend": AltairPlot.create_legend(
- position=size_legend_position, title=size_legend_title or size
- ),
- }
- if shape:
- encodings["shape"] = {
- "field": shape,
- "type": "quantitative" if is_numeric_dtype(value[shape]) else "nominal",
- "legend": AltairPlot.create_legend(
- position=shape_legend_position, title=shape_legend_title or shape
- ),
- }
- chart = (
- alt.Chart(value) # type: ignore
- .mark_point(clip=True) # type: ignore
- .encode(**encodings)
- .properties(background="transparent", **properties)
- )
- if interactive:
- chart = chart.interactive()
-
- return chart
-
- def postprocess(self, y: pd.DataFrame | Dict | None) -> Dict[str, str] | None:
- # if None or update
- if y is None or isinstance(y, Dict):
- return y
- if self.x is None or self.y is None:
- raise ValueError("No value provided for required parameters `x` and `y`.")
- chart = self.create_plot(
- value=y,
- x=self.x,
- y=self.y,
- color=self.color,
- size=self.size,
- shape=self.shape,
- title=self.title,
- tooltip=self.tooltip,
- x_title=self.x_title,
- y_title=self.y_title,
- color_legend_title=self.color_legend_title,
- size_legend_title=self.size_legend_title,
- shape_legend_title=self.size_legend_title,
- color_legend_position=self.color_legend_position,
- size_legend_position=self.size_legend_position,
- shape_legend_position=self.shape_legend_position,
- interactive=self.interactive_chart,
- height=self.height,
- width=self.width,
- x_lim=self.x_lim,
- y_lim=self.y_lim,
- )
-
- return {"type": "altair", "plot": chart.to_json(), "chart": "scatter"}
-
-
-@document("change", "clear")
-class LinePlot(Plot):
- """
- Create a line plot.
-
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a pandas dataframe with the data to plot.
-
- Demos: native_plots, live_dashboard
- """
-
- def __init__(
- self,
- value: pd.DataFrame | Callable | None = None,
- x: str | None = None,
- y: str | None = None,
- *,
- color: str | None = None,
- stroke_dash: str | None = None,
- overlay_point: bool | None = None,
- title: str | None = None,
- tooltip: List[str] | str | None = None,
- x_title: str | None = None,
- y_title: str | None = None,
- color_legend_title: str | None = None,
- stroke_dash_legend_title: str | None = None,
- color_legend_position: str | None = None,
- stroke_dash_legend_position: str | None = None,
- height: int | None = None,
- width: int | None = None,
- x_lim: List[int] | None = None,
- y_lim: List[int] | None = None,
- caption: str | None = None,
- interactive: bool | None = True,
- label: str | None = None,
- show_label: bool = True,
- every: float | None = None,
- visible: bool = True,
- elem_id: str | None = None,
- ):
- """
- Parameters:
- value: The pandas dataframe containing the data to display in a scatter plot.
- x: Column corresponding to the x axis.
- y: Column corresponding to the y axis.
- color: The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values.
- stroke_dash: The column to determine the symbol used to draw the line, e.g. dashed lines, dashed lines with points.
- overlay_point: Whether to draw a point on the line for each (x, y) coordinate pair.
- title: The title to display on top of the chart.
- tooltip: The column (or list of columns) to display on the tooltip when a user hovers a point on the plot.
- x_title: The title given to the x axis. By default, uses the value of the x parameter.
- y_title: The title given to the y axis. By default, uses the value of the y parameter.
- color_legend_title: The title given to the color legend. By default, uses the value of color parameter.
- stroke_dash_legend_title: The title given to the stroke_dash legend. By default, uses the value of the stroke_dash parameter.
- color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
- stroke_dash_legend_position: The position of the stoke_dash legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation.
- height: The height of the plot in pixels.
- width: The width of the plot in pixels.
- x_lim: A tuple or list containing the limits for the x-axis, specified as [x_min, x_max].
- y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].
- caption: The (optional) caption to display below the plot.
- interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.
- label: The (optional) label to display on the top left corner of the plot.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: Whether the label should be displayed.
- visible: Whether the plot should be visible.
- elem_id: Unique id used for custom css targetting.
- """
- self.x = x
- self.y = y
- self.color = color
- self.stroke_dash = stroke_dash
- self.tooltip = tooltip
- self.title = title
- self.x_title = x_title
- self.y_title = y_title
- self.color_legend_title = color_legend_title
- self.stroke_dash_legend_title = stroke_dash_legend_title
- self.color_legend_position = color_legend_position
- self.stroke_dash_legend_position = stroke_dash_legend_position
- self.overlay_point = overlay_point
- self.x_lim = x_lim
- self.y_lim = y_lim
- self.caption = caption
- self.interactive_chart = interactive
- self.width = width
- self.height = height
- super().__init__(
- value=value,
- label=label,
- show_label=show_label,
- visible=visible,
- elem_id=elem_id,
- every=every,
- )
-
- def get_config(self):
- config = super().get_config()
- config["caption"] = self.caption
- return config
-
- def get_block_name(self) -> str:
- return "plot"
-
- @staticmethod
- def update(
- value: pd.DataFrame | Dict | Literal[_Keywords.NO_VALUE] = _Keywords.NO_VALUE,
- x: str | None = None,
- y: str | None = None,
- color: str | None = None,
- stroke_dash: str | None = None,
- overlay_point: bool | None = None,
- title: str | None = None,
- tooltip: List[str] | str | None = None,
- x_title: str | None = None,
- y_title: str | None = None,
- color_legend_title: str | None = None,
- stroke_dash_legend_title: str | None = None,
- color_legend_position: str | None = None,
- stroke_dash_legend_position: str | None = None,
- height: int | None = None,
- width: int | None = None,
- x_lim: List[int] | None = None,
- y_lim: List[int] | None = None,
- interactive: bool | None = None,
- caption: str | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- visible: bool | None = None,
- ):
- """Update an existing plot component.
-
- If updating any of the plot properties (color, size, etc) the value, x, and y parameters must be specified.
-
- Parameters:
- value: The pandas dataframe containing the data to display in a scatter plot.
- x: Column corresponding to the x axis.
- y: Column corresponding to the y axis.
- color: The column to determine the point color. If the column contains numeric data, gradio will interpolate the column data so that small values correspond to light colors and large values correspond to dark values.
- stroke_dash: The column to determine the symbol used to draw the line, e.g. dashed lines, dashed lines with points.
- overlay_point: Whether to draw a point on the line for each (x, y) coordinate pair.
- title: The title to display on top of the chart.
- tooltip: The column (or list of columns) to display on the tooltip when a user hovers a point on the plot.
- x_title: The title given to the x axis. By default, uses the value of the x parameter.
- y_title: The title given to the y axis. By default, uses the value of the y parameter.
- color_legend_title: The title given to the color legend. By default, uses the value of color parameter.
- stroke_dash_legend_title: The title given to the stroke legend. By default, uses the value of stroke parameter.
- color_legend_position: The position of the color legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation
- stroke_dash_legend_position: The position of the stoke_dash legend. If the string value 'none' is passed, this legend is omitted. For other valid position values see: https://vega.github.io/vega/docs/legends/#orientation
- height: The height of the plot in pixels.
- width: The width of the plot in pixels.
- x_lim: A tuple or list containing the limits for the x-axis, specified as [x_min, x_max].
- y_lim: A tuple of list containing the limits for the y-axis, specified as [y_min, y_max].
- caption: The (optional) caption to display below the plot.
- interactive: Whether users should be able to interact with the plot by panning or zooming with their mouse or trackpad.
- label: The (optional) label to display in the top left corner of the plot.
- show_label: Whether the label should be displayed.
- visible: Whether the plot should be visible.
- """
- properties = [
- x,
- y,
- color,
- stroke_dash,
- overlay_point,
- title,
- tooltip,
- x_title,
- y_title,
- color_legend_title,
- stroke_dash_legend_title,
- color_legend_position,
- stroke_dash_legend_position,
- height,
- width,
- x_lim,
- y_lim,
- interactive,
- ]
- if any(properties):
- if not isinstance(value, pd.DataFrame):
- raise ValueError(
- "In order to update plot properties the value parameter "
- "must be provided, and it must be a Dataframe. Please pass a value "
- "parameter to gr.LinePlot.update."
- )
- if x is None or y is None:
- raise ValueError(
- "In order to update plot properties, the x and y axis data "
- "must be specified. Please pass valid values for x an y to "
- "gr.LinePlot.update."
- )
- chart = LinePlot.create_plot(value, *properties)
- value = {"type": "altair", "plot": chart.to_json(), "chart": "line"}
-
- updated_config = {
- "label": label,
- "show_label": show_label,
- "visible": visible,
- "value": value,
- "caption": caption,
- "__type__": "update",
- }
- return updated_config
-
- @staticmethod
- def create_plot(
- value: pd.DataFrame,
- x: str,
- y: str,
- color: str | None = None,
- stroke_dash: str | None = None,
- overlay_point: bool | None = None,
- title: str | None = None,
- tooltip: List[str] | str | None = None,
- x_title: str | None = None,
- y_title: str | None = None,
- color_legend_title: str | None = None,
- stroke_dash_legend_title: str | None = None,
- color_legend_position: str | None = None,
- stroke_dash_legend_position: str | None = None,
- height: int | None = None,
- width: int | None = None,
- x_lim: List[int] | None = None,
- y_lim: List[int] | None = None,
- interactive: bool | None = None,
- ):
- """Helper for creating the scatter plot."""
- interactive = True if interactive is None else interactive
- encodings = dict(
- x=alt.X(
- x, # type: ignore
- title=x_title or x, # type: ignore
- scale=AltairPlot.create_scale(x_lim), # type: ignore
- ),
- y=alt.Y(
- y, # type: ignore
- title=y_title or y, # type: ignore
- scale=AltairPlot.create_scale(y_lim), # type: ignore
- ),
- )
- properties = {}
- if title:
- properties["title"] = title
- if height:
- properties["height"] = height
- if width:
- properties["width"] = width
-
- if color:
- domain = value[color].unique().tolist()
- range_ = list(range(len(domain)))
- encodings["color"] = {
- "field": color,
- "type": "nominal",
- "scale": {"domain": domain, "range": range_},
- "legend": AltairPlot.create_legend(
- position=color_legend_position, title=color_legend_title or color
- ),
- }
-
- highlight = None
- if interactive and any([color, stroke_dash]):
- highlight = alt.selection(
- type="single", # type: ignore
- on="mouseover",
- fields=[c for c in [color, stroke_dash] if c],
- nearest=True,
- )
-
- if stroke_dash:
- stroke_dash = {
- "field": stroke_dash, # type: ignore
- "legend": AltairPlot.create_legend( # type: ignore
- position=stroke_dash_legend_position, # type: ignore
- title=stroke_dash_legend_title or stroke_dash, # type: ignore
- ), # type: ignore
- } # type: ignore
- else:
- stroke_dash = alt.value(alt.Undefined) # type: ignore
-
- if tooltip:
- encodings["tooltip"] = tooltip
-
- chart = alt.Chart(value).encode(**encodings) # type: ignore
-
- points = chart.mark_point(clip=True).encode(
- opacity=alt.value(alt.Undefined) if overlay_point else alt.value(0),
- )
- lines = chart.mark_line(clip=True).encode(strokeDash=stroke_dash)
-
- if highlight:
- points = points.add_selection(highlight)
-
- lines = lines.encode(
- size=alt.condition(highlight, alt.value(4), alt.value(1)),
- )
-
- chart = (lines + points).properties(background="transparent", **properties)
- if interactive:
- chart = chart.interactive()
-
- return chart
-
- def postprocess(self, y: pd.DataFrame | Dict | None) -> Dict[str, str] | None:
- # if None or update
- if y is None or isinstance(y, Dict):
- return y
- if self.x is None or self.y is None:
- raise ValueError("No value provided for required parameters `x` and `y`.")
- chart = self.create_plot(
- value=y,
- x=self.x,
- y=self.y,
- color=self.color,
- overlay_point=self.overlay_point,
- title=self.title,
- tooltip=self.tooltip,
- x_title=self.x_title,
- y_title=self.y_title,
- color_legend_title=self.color_legend_title,
- color_legend_position=self.color_legend_position,
- stroke_dash_legend_title=self.stroke_dash_legend_title,
- stroke_dash_legend_position=self.stroke_dash_legend_position,
- x_lim=self.x_lim,
- y_lim=self.y_lim,
- stroke_dash=self.stroke_dash,
- interactive=self.interactive_chart,
- height=self.height,
- width=self.width,
- )
-
- return {"type": "altair", "plot": chart.to_json(), "chart": "line"}
-
-
-@document("change")
-class Markdown(IOComponent, Changeable, SimpleSerializable):
- """
- Used to render arbitrary Markdown output. Can also render latex enclosed by dollar signs.
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a valid {str} that can be rendered as Markdown.
-
- Demos: blocks_hello, blocks_kinematics
- Guides: key_features
- """
-
- def __init__(
- self,
- value: str | Callable = "",
- *,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Value to show in Markdown component. If callable, the function will be called whenever the app loads to set the initial value of the component.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.md = (
- MarkdownIt()
- .use(dollarmath_plugin, renderer=utils.tex2svg, allow_digits=False)
- .enable("table")
- )
- IOComponent.__init__(
- self, visible=visible, elem_id=elem_id, value=value, **kwargs
- )
-
- def postprocess(self, y: str | None) -> str | None:
- """
- Parameters:
- y: markdown representation
- Returns:
- HTML rendering of markdown
- """
- if y is None:
- return None
- unindented_y = inspect.cleandoc(y)
- return self.md.render(unindented_y)
-
- def get_config(self):
- return {
- "value": self.value,
- **Component.get_config(self),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- visible: bool | None = None,
- ):
- updated_config = {
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return updated_config
-
- def style(self):
- return self
-
- def as_example(self, input_data: str | None) -> str:
- postprocessed = self.postprocess(input_data)
- return postprocessed if postprocessed else ""
-
-
-############################
-# Special Components
-############################
-
-
-@document("click", "style")
-class Dataset(Clickable, Component):
- """
- Used to create an output widget for showing datasets. Used to render the examples
- box.
- Preprocessing: passes the selected sample either as a {list} of data (if type="value") or as an {int} index (if type="index")
- Postprocessing: expects a {list} of {lists} corresponding to the dataset data.
- """
-
- def __init__(
- self,
- *,
- label: str | None = None,
- components: List[IOComponent] | List[str],
- samples: List[List[Any]] | None = None,
- headers: List[str] | None = None,
- type: str = "values",
- samples_per_page: int = 10,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- components: Which component types to show in this dataset widget, can be passed in as a list of string names or Components instances. The following components are supported in a Dataset: Audio, Checkbox, CheckboxGroup, ColorPicker, Dataframe, Dropdown, File, HTML, Image, Markdown, Model3D, Number, Radio, Slider, Textbox, TimeSeries, Video
- samples: a nested list of samples. Each sublist within the outer list represents a data sample, and each element within the sublist represents an value for each component
- headers: Column headers in the Dataset widget, should be the same len as components. If not provided, inferred from component labels
- type: 'values' if clicking on a sample should pass the value of the sample, or "index" if it should pass the index of the sample
- samples_per_page: how many examples to show per page.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- Component.__init__(self, visible=visible, elem_id=elem_id, **kwargs)
- self.components = [get_component_instance(c, render=False) for c in components]
-
- # Narrow type to IOComponent
- assert all(
- [isinstance(c, IOComponent) for c in self.components]
- ), "All components in a `Dataset` must be subclasses of `IOComponent`"
- self.components = [c for c in self.components if isinstance(c, IOComponent)]
-
- self.samples = [[]] if samples is None else samples
- for example in self.samples:
- for i, (component, ex) in enumerate(zip(self.components, example)):
- example[i] = component.as_example(ex)
- self.type = type
- self.label = label
- if headers is not None:
- self.headers = headers
- elif all([c.label is None for c in self.components]):
- self.headers = []
- else:
- self.headers = [c.label or "" for c in self.components]
- self.samples_per_page = samples_per_page
-
- def get_config(self):
- return {
- "components": [component.get_block_name() for component in self.components],
- "headers": self.headers,
- "samples": self.samples,
- "type": self.type,
- "label": self.label,
- "samples_per_page": self.samples_per_page,
- **Component.get_config(self),
- }
-
- @staticmethod
- def update(
- samples: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- visible: bool | None = None,
- label: str | None = None,
- ):
- return {
- "samples": samples,
- "visible": visible,
- "label": label,
- "__type__": "update",
- }
-
- def preprocess(self, x: Any) -> Any:
- """
- Any preprocessing needed to be performed on function input.
- """
- if self.type == "index":
- return x
- elif self.type == "values":
- return self.samples[x]
-
- def postprocess(self, samples: List[List[Any]]) -> Dict:
- return {
- "samples": samples,
- "__type__": "update",
- }
-
- def style(self, **kwargs):
- """
- This method can be used to change the appearance of the Dataset component.
- """
- return Component.style(self, **kwargs)
-
-
-@document()
-class Interpretation(Component):
- """
- Used to create an interpretation widget for a component.
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a {dict} with keys "original" and "interpretation".
-
- Guides: custom_interpretations_with_blocks
- """
-
- def __init__(
- self,
- component: Component,
- *,
- visible: bool = True,
- elem_id: str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- component: Which component to show in the interpretation widget.
- visible: Whether or not the interpretation is visible.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- Component.__init__(self, visible=visible, elem_id=elem_id, **kwargs)
- self.component = component
-
- def get_config(self):
- return {
- "component": self.component.get_block_name(),
- "component_props": self.component.get_config(),
- }
-
- @staticmethod
- def update(
- value: Any | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
- visible: bool | None = None,
- ):
- return {
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
-
- def style(self):
- return self
-
-
-class StatusTracker(Component):
- def __init__(
- self,
- **kwargs,
- ):
- warnings.warn("The StatusTracker component is deprecated.")
-
-
-def component(cls_name: str) -> Component:
- obj = utils.component_or_layout_class(cls_name)()
- if isinstance(obj, BlockContext):
- raise ValueError(f"Invalid component: {obj.__class__}")
- return obj
-
-
-def get_component_instance(comp: str | dict | Component, render=True) -> Component:
- if isinstance(comp, str):
- component_obj = component(comp)
- if not (render):
- component_obj.unrender()
- return component_obj
- elif isinstance(comp, dict):
- name = comp.pop("name")
- component_cls = utils.component_or_layout_class(name)
- component_obj = component_cls(**comp)
- if isinstance(component_obj, BlockContext):
- raise ValueError(f"Invalid component: {name}")
- if not (render):
- component_obj.unrender()
- return component_obj
- elif isinstance(comp, Component):
- return comp
- else:
- raise ValueError(
- f"Component must provided as a `str` or `dict` or `Component` but is {comp}"
- )
-
-
-Text = Textbox
-DataFrame = Dataframe
-Highlightedtext = HighlightedText
-Highlight = HighlightedText
-Checkboxgroup = CheckboxGroup
-TimeSeries = Timeseries
-Json = JSON
diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/events.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/events.py
deleted file mode 100644
index f0692c5d5f7e8f49d18fd25a7c5f60c9c033e418..0000000000000000000000000000000000000000
--- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/events.py
+++ /dev/null
@@ -1,723 +0,0 @@
-"""Contains all of the events that can be triggered in a gr.Blocks() app, with the exception
-of the on-page-load event, which is defined in gr.Blocks().load()."""
-
-from __future__ import annotations
-
-import warnings
-from typing import TYPE_CHECKING, Any, Callable, Dict, List, Set
-
-from gradio.blocks import Block
-from gradio.utils import get_cancel_function
-
-if TYPE_CHECKING: # Only import for type checking (is False at runtime).
- from gradio.components import Component, StatusTracker
-
-
-def set_cancel_events(
- block: Block, event_name: str, cancels: None | Dict[str, Any] | List[Dict[str, Any]]
-):
- if cancels:
- if not isinstance(cancels, list):
- cancels = [cancels]
- cancel_fn, fn_indices_to_cancel = get_cancel_function(cancels)
- block.set_event_trigger(
- event_name,
- cancel_fn,
- inputs=None,
- outputs=None,
- queue=False,
- preprocess=False,
- cancels=fn_indices_to_cancel,
- )
-
-
-class EventListener(Block):
- pass
-
-
-class Changeable(EventListener):
- def change(
- self,
- fn: Callable | None,
- inputs: Component | List[Component] | Set[Component] | None = None,
- outputs: Component | List[Component] | None = None,
- api_name: str | None = None,
- status_tracker: StatusTracker | None = None,
- scroll_to_output: bool = False,
- show_progress: bool = True,
- queue: bool | None = None,
- batch: bool = False,
- max_batch_size: int = 4,
- preprocess: bool = True,
- postprocess: bool = True,
- cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None,
- every: float | None = None,
- _js: str | None = None,
- ):
- """
- This event is triggered when the component's input value changes (e.g. when the user types in a textbox
- or uploads an image). This method can be used when this component is in a Gradio Blocks.
-
- Parameters:
- fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
- inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
- outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.
- api_name: Defining this parameter exposes the endpoint in the api docs
- scroll_to_output: If True, will scroll to output component on completion
- show_progress: If True, will show progress animation while pending
- queue: If True, will place the request on the queue, if the queue exists
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
- max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
- preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
- postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
- cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method.
- every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- if status_tracker:
- warnings.warn(
- "The 'status_tracker' parameter has been deprecated and has no effect."
- )
- dep = self.set_event_trigger(
- "change",
- fn,
- inputs,
- outputs,
- preprocess=preprocess,
- postprocess=postprocess,
- scroll_to_output=scroll_to_output,
- show_progress=show_progress,
- api_name=api_name,
- js=_js,
- queue=queue,
- batch=batch,
- max_batch_size=max_batch_size,
- every=every,
- )
- set_cancel_events(self, "change", cancels)
- return dep
-
-
-class Clickable(EventListener):
- def click(
- self,
- fn: Callable | None,
- inputs: Component | List[Component] | Set[Component] | None = None,
- outputs: Component | List[Component] | None = None,
- api_name: str | None = None,
- status_tracker: StatusTracker | None = None,
- scroll_to_output: bool = False,
- show_progress: bool = True,
- queue=None,
- batch: bool = False,
- max_batch_size: int = 4,
- preprocess: bool = True,
- postprocess: bool = True,
- cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None,
- every: float | None = None,
- _js: str | None = None,
- ):
- """
- This event is triggered when the component (e.g. a button) is clicked.
- This method can be used when this component is in a Gradio Blocks.
-
- Parameters:
- fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
- inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
- outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.
- api_name: Defining this parameter exposes the endpoint in the api docs
- scroll_to_output: If True, will scroll to output component on completion
- show_progress: If True, will show progress animation while pending
- queue: If True, will place the request on the queue, if the queue exists
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
- max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
- preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
- postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
- cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method.
- every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- if status_tracker:
- warnings.warn(
- "The 'status_tracker' parameter has been deprecated and has no effect."
- )
-
- dep = self.set_event_trigger(
- "click",
- fn,
- inputs,
- outputs,
- preprocess=preprocess,
- postprocess=postprocess,
- scroll_to_output=scroll_to_output,
- show_progress=show_progress,
- api_name=api_name,
- js=_js,
- queue=queue,
- batch=batch,
- max_batch_size=max_batch_size,
- every=every,
- )
- set_cancel_events(self, "click", cancels)
- return dep
-
-
-class Submittable(EventListener):
- def submit(
- self,
- fn: Callable | None,
- inputs: Component | List[Component] | Set[Component] | None = None,
- outputs: Component | List[Component] | None = None,
- api_name: str | None = None,
- status_tracker: StatusTracker | None = None,
- scroll_to_output: bool = False,
- show_progress: bool = True,
- queue: bool | None = None,
- batch: bool = False,
- max_batch_size: int = 4,
- preprocess: bool = True,
- postprocess: bool = True,
- cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None,
- every: float | None = None,
- _js: str | None = None,
- ):
- """
- This event is triggered when the user presses the Enter key while the component (e.g. a textbox) is focused.
- This method can be used when this component is in a Gradio Blocks.
-
-
- Parameters:
- fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
- inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
- outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.
- api_name: Defining this parameter exposes the endpoint in the api docs
- scroll_to_output: If True, will scroll to output component on completion
- show_progress: If True, will show progress animation while pending
- queue: If True, will place the request on the queue, if the queue exists
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
- max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
- preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
- postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
- cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method.
- every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- if status_tracker:
- warnings.warn(
- "The 'status_tracker' parameter has been deprecated and has no effect."
- )
-
- dep = self.set_event_trigger(
- "submit",
- fn,
- inputs,
- outputs,
- preprocess=preprocess,
- postprocess=postprocess,
- scroll_to_output=scroll_to_output,
- show_progress=show_progress,
- api_name=api_name,
- js=_js,
- queue=queue,
- batch=batch,
- max_batch_size=max_batch_size,
- every=every,
- )
- set_cancel_events(self, "submit", cancels)
- return dep
-
-
-class Editable(EventListener):
- def edit(
- self,
- fn: Callable | None,
- inputs: Component | List[Component] | Set[Component] | None = None,
- outputs: Component | List[Component] | None = None,
- api_name: str | None = None,
- status_tracker: StatusTracker | None = None,
- scroll_to_output: bool = False,
- show_progress: bool = True,
- queue: bool | None = None,
- batch: bool = False,
- max_batch_size: int = 4,
- preprocess: bool = True,
- postprocess: bool = True,
- cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None,
- every: float | None = None,
- _js: str | None = None,
- ):
- """
- This event is triggered when the user edits the component (e.g. image) using the
- built-in editor. This method can be used when this component is in a Gradio Blocks.
-
- Parameters:
- fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
- inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
- outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.
- api_name: Defining this parameter exposes the endpoint in the api docs
- scroll_to_output: If True, will scroll to output component on completion
- show_progress: If True, will show progress animation while pending
- queue: If True, will place the request on the queue, if the queue exists
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
- max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
- preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
- postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
- cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method.
- every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- if status_tracker:
- warnings.warn(
- "The 'status_tracker' parameter has been deprecated and has no effect."
- )
-
- dep = self.set_event_trigger(
- "edit",
- fn,
- inputs,
- outputs,
- preprocess=preprocess,
- postprocess=postprocess,
- scroll_to_output=scroll_to_output,
- show_progress=show_progress,
- api_name=api_name,
- js=_js,
- queue=queue,
- batch=batch,
- max_batch_size=max_batch_size,
- every=every,
- )
- set_cancel_events(self, "edit", cancels)
- return dep
-
-
-class Clearable(EventListener):
- def clear(
- self,
- fn: Callable | None,
- inputs: Component | List[Component] | Set[Component] | None = None,
- outputs: Component | List[Component] | None = None,
- api_name: str | None = None,
- status_tracker: StatusTracker | None = None,
- scroll_to_output: bool = False,
- show_progress: bool = True,
- queue: bool | None = None,
- batch: bool = False,
- max_batch_size: int = 4,
- preprocess: bool = True,
- postprocess: bool = True,
- cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None,
- every: float | None = None,
- _js: str | None = None,
- ):
- """
- This event is triggered when the user clears the component (e.g. image or audio)
- using the X button for the component. This method can be used when this component is in a Gradio Blocks.
-
- Parameters:
- fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
- inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
- outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.
- api_name: Defining this parameter exposes the endpoint in the api docs
- scroll_to_output: If True, will scroll to output component on completion
- show_progress: If True, will show progress animation while pending
- queue: If True, will place the request on the queue, if the queue exists
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
- max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
- preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
- postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
- cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method.
- every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- if status_tracker:
- warnings.warn(
- "The 'status_tracker' parameter has been deprecated and has no effect."
- )
-
- dep = self.set_event_trigger(
- "submit",
- fn,
- inputs,
- outputs,
- preprocess=preprocess,
- postprocess=postprocess,
- scroll_to_output=scroll_to_output,
- show_progress=show_progress,
- api_name=api_name,
- js=_js,
- queue=queue,
- batch=batch,
- max_batch_size=max_batch_size,
- every=every,
- )
- set_cancel_events(self, "submit", cancels)
- return dep
-
-
-class Playable(EventListener):
- def play(
- self,
- fn: Callable | None,
- inputs: Component | List[Component] | Set[Component] | None = None,
- outputs: Component | List[Component] | None = None,
- api_name: str | None = None,
- status_tracker: StatusTracker | None = None,
- scroll_to_output: bool = False,
- show_progress: bool = True,
- queue: bool | None = None,
- batch: bool = False,
- max_batch_size: int = 4,
- preprocess: bool = True,
- postprocess: bool = True,
- cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None,
- every: float | None = None,
- _js: str | None = None,
- ):
- """
- This event is triggered when the user plays the component (e.g. audio or video).
- This method can be used when this component is in a Gradio Blocks.
-
- Parameters:
- fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
- inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
- outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.
- api_name: Defining this parameter exposes the endpoint in the api docs
- scroll_to_output: If True, will scroll to output component on completion
- show_progress: If True, will show progress animation while pending
- queue: If True, will place the request on the queue, if the queue exists
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
- max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
- preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
- postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
- cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method.
- every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- if status_tracker:
- warnings.warn(
- "The 'status_tracker' parameter has been deprecated and has no effect."
- )
-
- dep = self.set_event_trigger(
- "play",
- fn,
- inputs,
- outputs,
- preprocess=preprocess,
- postprocess=postprocess,
- scroll_to_output=scroll_to_output,
- show_progress=show_progress,
- api_name=api_name,
- js=_js,
- queue=queue,
- batch=batch,
- max_batch_size=max_batch_size,
- every=every,
- )
- set_cancel_events(self, "play", cancels)
- return dep
-
- def pause(
- self,
- fn: Callable | None,
- inputs: Component | List[Component] | Set[Component] | None = None,
- outputs: Component | List[Component] | None = None,
- api_name: str | None = None,
- status_tracker: StatusTracker | None = None,
- scroll_to_output: bool = False,
- show_progress: bool = True,
- queue: bool | None = None,
- batch: bool = False,
- max_batch_size: int = 4,
- preprocess: bool = True,
- postprocess: bool = True,
- cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None,
- every: float | None = None,
- _js: str | None = None,
- ):
- """
- This event is triggered when the user pauses the component (e.g. audio or video).
- This method can be used when this component is in a Gradio Blocks.
-
- Parameters:
- fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
- inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
- outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.
- api_name: Defining this parameter exposes the endpoint in the api docs
- scroll_to_output: If True, will scroll to output component on completion
- show_progress: If True, will show progress animation while pending
- queue: If True, will place the request on the queue, if the queue exists
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
- max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
- preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
- postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
- cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method.
- every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- if status_tracker:
- warnings.warn(
- "The 'status_tracker' parameter has been deprecated and has no effect."
- )
-
- dep = self.set_event_trigger(
- "pause",
- fn,
- inputs,
- outputs,
- preprocess=preprocess,
- postprocess=postprocess,
- scroll_to_output=scroll_to_output,
- show_progress=show_progress,
- api_name=api_name,
- js=_js,
- queue=queue,
- batch=batch,
- max_batch_size=max_batch_size,
- every=every,
- )
- set_cancel_events(self, "pause", cancels)
- return dep
-
- def stop(
- self,
- fn: Callable | None,
- inputs: Component | List[Component] | Set[Component] | None = None,
- outputs: Component | List[Component] | None = None,
- api_name: str | None = None,
- status_tracker: StatusTracker | None = None,
- scroll_to_output: bool = False,
- show_progress: bool = True,
- queue: bool | None = None,
- batch: bool = False,
- max_batch_size: int = 4,
- preprocess: bool = True,
- postprocess: bool = True,
- cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None,
- every: float | None = None,
- _js: str | None = None,
- ):
- """
- This event is triggered when the user stops the component (e.g. audio or video).
- This method can be used when this component is in a Gradio Blocks.
-
- Parameters:
- fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
- inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
- outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.
- api_name: Defining this parameter exposes the endpoint in the api docs
- scroll_to_output: If True, will scroll to output component on completion
- show_progress: If True, will show progress animation while pending
- queue: If True, will place the request on the queue, if the queue exists
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
- max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
- preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
- postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
- cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method.
- every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- if status_tracker:
- warnings.warn(
- "The 'status_tracker' parameter has been deprecated and has no effect."
- )
-
- dep = self.set_event_trigger(
- "stop",
- fn,
- inputs,
- outputs,
- preprocess=preprocess,
- postprocess=postprocess,
- scroll_to_output=scroll_to_output,
- show_progress=show_progress,
- api_name=api_name,
- js=_js,
- queue=queue,
- batch=batch,
- max_batch_size=max_batch_size,
- every=every,
- )
- set_cancel_events(self, "stop", cancels)
- return dep
-
-
-class Streamable(EventListener):
- def stream(
- self,
- fn: Callable | None,
- inputs: Component | List[Component] | Set[Component] | None = None,
- outputs: Component | List[Component] | None = None,
- api_name: str | None = None,
- status_tracker: StatusTracker | None = None,
- scroll_to_output: bool = False,
- show_progress: bool = False,
- queue: bool | None = None,
- batch: bool = False,
- max_batch_size: int = 4,
- preprocess: bool = True,
- postprocess: bool = True,
- cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None,
- every: float | None = None,
- _js: str | None = None,
- ):
- """
- This event is triggered when the user streams the component (e.g. a live webcam
- component). This method can be used when this component is in a Gradio Blocks.
-
- Parameters:
- fn: the function to wrap an interface around. Often a machine learning model's prediction function. Each parameter of the function corresponds to one input component, and the function should return a single value or a tuple of values, with each element in the tuple corresponding to one output component.
- inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
- outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.
- api_name: Defining this parameter exposes the endpoint in the api docs
- scroll_to_output: If True, will scroll to output component on completion
- show_progress: If True, will show progress animation while pending
- queue: If True, will place the request on the queue, if the queue exists
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
- max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
- preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
- postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
- cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method.
- every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
- self.streaming = True
-
- if status_tracker:
- warnings.warn(
- "The 'status_tracker' parameter has been deprecated and has no effect."
- )
-
- dep = self.set_event_trigger(
- "stream",
- fn,
- inputs,
- outputs,
- preprocess=preprocess,
- postprocess=postprocess,
- scroll_to_output=scroll_to_output,
- show_progress=show_progress,
- api_name=api_name,
- js=_js,
- queue=queue,
- batch=batch,
- max_batch_size=max_batch_size,
- every=every,
- )
- set_cancel_events(self, "stream", cancels)
- return dep
-
-
-class Blurrable(EventListener):
- def blur(
- self,
- fn: Callable | None,
- inputs: Component | List[Component] | Set[Component] | None = None,
- outputs: Component | List[Component] | None = None,
- api_name: str | None = None,
- scroll_to_output: bool = False,
- show_progress: bool = True,
- queue: bool | None = None,
- batch: bool = False,
- max_batch_size: int = 4,
- preprocess: bool = True,
- postprocess: bool = True,
- cancels: Dict[str, Any] | List[Dict[str, Any]] | None = None,
- every: float | None = None,
- _js: str | None = None,
- ):
- """
- This event is triggered when the component's is unfocused/blurred (e.g. when the user clicks outside of a textbox). This method can be used when this component is in a Gradio Blocks.
-
- Parameters:
- fn: Callable function
- inputs: List of gradio.components to use as inputs. If the function takes no inputs, this should be an empty list.
- outputs: List of gradio.components to use as inputs. If the function returns no outputs, this should be an empty list.
- api_name: Defining this parameter exposes the endpoint in the api docs
- scroll_to_output: If True, will scroll to output component on completion
- show_progress: If True, will show progress animation while pending
- queue: If True, will place the request on the queue, if the queue exists
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
- max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
- preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
- postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
- cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method.
- every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
-
- self.set_event_trigger(
- "blur",
- fn,
- inputs,
- outputs,
- preprocess=preprocess,
- postprocess=postprocess,
- scroll_to_output=scroll_to_output,
- show_progress=show_progress,
- api_name=api_name,
- js=_js,
- queue=queue,
- batch=batch,
- max_batch_size=max_batch_size,
- every=every,
- )
- set_cancel_events(self, "blur", cancels)
-
-
-class Uploadable(EventListener):
- def upload(
- self,
- fn: Callable | None,
- inputs: List[Component],
- outputs: Component | List[Component] | None = None,
- api_name: str | None = None,
- scroll_to_output: bool = False,
- show_progress: bool = True,
- queue: bool | None = None,
- batch: bool = False,
- max_batch_size: int = 4,
- preprocess: bool = True,
- postprocess: bool = True,
- cancels: List[Dict[str, Any]] | None = None,
- every: float | None = None,
- _js: str | None = None,
- ):
- """
- This event is triggered when the user uploads a file into the component (e.g. when the user uploads a video into a video component). This method can be used when this component is in a Gradio Blocks.
-
- Parameters:
- fn: Callable function
- inputs: List of inputs
- outputs: List of outputs
- api_name: Defining this parameter exposes the endpoint in the api docs
- scroll_to_output: If True, will scroll to output component on completion
- show_progress: If True, will show progress animation while pending
- queue: If True, will place the request on the queue, if the queue exists
- batch: If True, then the function should process a batch of inputs, meaning that it should accept a list of input values for each parameter. The lists should be of equal length (and be up to length `max_batch_size`). The function is then *required* to return a tuple of lists (even if there is only 1 output component), with each list in the tuple corresponding to one output component.
- max_batch_size: Maximum number of inputs to batch together if this is called from the queue (only relevant if batch=True)
- preprocess: If False, will not run preprocessing of component data before running 'fn' (e.g. leaving it as a base64 string if this method is called with the `Image` component).
- postprocess: If False, will not run postprocessing of component data before returning 'fn' output to the browser.
- cancels: A list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method.
- every: Run this event 'every' number of seconds while the client connection is open. Interpreted in seconds. Queue must be enabled.
- """
- # _js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components.
-
- self.set_event_trigger(
- "upload",
- fn,
- inputs,
- outputs,
- preprocess=preprocess,
- postprocess=postprocess,
- scroll_to_output=scroll_to_output,
- show_progress=show_progress,
- api_name=api_name,
- js=_js,
- queue=queue,
- batch=batch,
- max_batch_size=max_batch_size,
- every=every,
- )
- set_cancel_events(self, "upload", cancels)
diff --git a/spaces/Hoshiyume/FixedStar-DebugChat/app.py b/spaces/Hoshiyume/FixedStar-DebugChat/app.py
deleted file mode 100644
index f15b5354d5bcb579ff7343f25724a8c6b1515d81..0000000000000000000000000000000000000000
--- a/spaces/Hoshiyume/FixedStar-DebugChat/app.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import gradio as gr
-import itertools
-from transformers import AutoTokenizer
-import ctranslate2
-
-generator = ctranslate2.Generator("./ct2-model" )
-tokenizer = AutoTokenizer.from_pretrained( "./models", use_fast=False)
-
-static_prompt="""ユーザー: We will now start chatting. If spoken to in English, answer in English; if spoken to in Japanese, answer in Japanese. Please take a deep breath and calm down and have a conversation.
-システム: I'll try to keep calm and have a conversation.
-ユーザー: その調子で頑張ってください。
-システム: 分かりました。
-"""
-system_prompt_tokens=tokenizer.convert_ids_to_tokens(tokenizer.encode(static_prompt, add_special_tokens=False))
-
-def inference_func(prompt, max_length=128, sampling_topk=40, sampling_topp=0.75, sampling_temperature=0.7, repetition_penalty=1.4):
- tokens = tokenizer.convert_ids_to_tokens( tokenizer.encode(prompt, add_special_tokens=False))
- results = generator.generate_batch(
- [tokens],
- static_prompt=system_prompt_tokens,
- max_length=max_length,
- sampling_topk=sampling_topk,
- sampling_topp=sampling_topp,
- sampling_temperature=sampling_temperature,
- repetition_penalty=repetition_penalty,
- include_prompt_in_result=False,
- )
- output = tokenizer.decode(results[0].sequences_ids[0])
- return output
-
-def make_prompt(message, chat_history, max_context_size: int = 10):
- contexts = chat_history + [[message, ""]]
- contexts = list(itertools.chain.from_iterable(contexts))
- if max_context_size > 0:
- context_size = max_context_size - 1
- else:
- context_size = 100000
- contexts = contexts[-context_size:]
- prompt = []
- for idx, context in enumerate(reversed(contexts)):
- if idx % 2 == 0:
- prompt = [f"システム: {context}"] + prompt
- else:
- prompt = [f"ユーザー: {context}"] + prompt
- prompt = "\n".join(prompt)
- return prompt
-
-def interact_func(message, chat_history, max_context_size, max_length, sampling_topk, sampling_topp, sampling_temperature, repetition_penalty ):
- prompt = make_prompt(message, chat_history, max_context_size)
- print(f"prompt: {prompt}")
- generated = inference_func(prompt, max_length, sampling_topk, sampling_topp, sampling_temperature, repetition_penalty )
- print(f"generated: {generated}")
- chat_history.append((message, generated))
- return "", chat_history
-
-with gr.Blocks( theme="monochrome" ) as demo:
- with gr.Accordion("Parameters", open=False):
- # max_context_size = the number of turns * 2
- max_context_size = gr.Number(value=10, label="max_context_size", precision=0)
- max_length = gr.Number(value=128, label="max_length", precision=0)
- sampling_topk = gr.Slider(0, 1000, value=40, step=0.1, label="top_k")
- sampling_topp = gr.Slider(0.1, 1.0, value=0.75, step=0.1, label="top_p")
- sampling_temperature = gr.Slider(0.0, 10.0, value=0.7, step=0.1, label="temperature")
- repetition_penalty = gr.Slider(0.0, 10.0, value=1.4, step=0.1, label="repetition_penalty")
- chatbot = gr.Chatbot( show_copy_button=True, show_share_button="RETRY" )
- msg = gr.Textbox()
- clear = gr.Button("RESET")
- msg.submit(
- interact_func,
- [msg, chatbot, max_context_size, max_length, sampling_topk, sampling_topp, sampling_temperature, repetition_penalty],
- [msg, chatbot],
- )
- clear.click(lambda: None, None, chatbot, queue=False)
-
-if __name__ == "__main__":
- demo.launch(debug=True)
\ No newline at end of file
diff --git a/spaces/Hua626/QQsign/Dockerfile b/spaces/Hua626/QQsign/Dockerfile
deleted file mode 100644
index 535624113f3b520e4829240a48bd3652430de828..0000000000000000000000000000000000000000
--- a/spaces/Hua626/QQsign/Dockerfile
+++ /dev/null
@@ -1,23 +0,0 @@
-FROM openjdk:17-slim
-
-# 设置时区
-ENV TZ Asia/Shanghai
-
-# 设置工作目录
-WORKDIR /app
-
-# 复制文件到工作目录
-COPY bin /app/bin
-COPY lib /app/lib
-COPY txlib /app/txlib
-
-# 设置命令
-RUN chmod -R 777 /tmp
-RUN chmod -R 777 /app
-RUN sed 's/"key": ".*"/"key": "'"$KEY_VALUE"'"/' txlib/$TXLIB_VERSION/config.json > /app/txlib/$TXLIB_VERSION/config.json
-
-# 运行
-CMD bash bin/unidbg-fetch-qsign --basePath=txlib/$TXLIB_VERSION
-
-# 暴露端口
-EXPOSE 7860
\ No newline at end of file
diff --git a/spaces/ICML2022/OFA/fairseq/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/ICML2022/OFA/fairseq/.github/ISSUE_TEMPLATE/feature_request.md
deleted file mode 100644
index 93c8668041f8a7af29e4c11e905d8b56b946dd51..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/.github/ISSUE_TEMPLATE/feature_request.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-name: 🚀 Feature Request
-about: Submit a proposal/request for a new feature
-labels: 'enhancement, help wanted, needs triage'
----
-
-## 🚀 Feature Request
-
-
-### Motivation
-
-
-
-### Pitch
-
-
-
-### Alternatives
-
-
-
-### Additional context
-
-
diff --git a/spaces/ICML2022/OFA/fairseq/examples/bart/README.summarization.md b/spaces/ICML2022/OFA/fairseq/examples/bart/README.summarization.md
deleted file mode 100644
index 8727584f2b2bdd880c6cd3abbf39b75dfbf4a67c..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/bart/README.summarization.md
+++ /dev/null
@@ -1,102 +0,0 @@
-# Fine-tuning BART on CNN-Dailymail summarization task
-
-### 1) Download the CNN and Daily Mail data and preprocess it into data files with non-tokenized cased samples.
-
-Follow the instructions [here](https://github.com/abisee/cnn-dailymail) to download the original CNN and Daily Mail datasets. To preprocess the data, refer to the pointers in [this issue](https://github.com/pytorch/fairseq/issues/1391) or check out the code [here](https://github.com/artmatsak/cnn-dailymail).
-
-Follow the instructions [here](https://github.com/EdinburghNLP/XSum) to download the original Extreme Summarization datasets, or check out the code [here](https://github.com/EdinburghNLP/XSum/tree/master/XSum-Dataset), Please keep the raw dataset and make sure no tokenization nor BPE on the dataset.
-
-### 2) BPE preprocess:
-
-```bash
-wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json'
-wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe'
-wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt'
-
-TASK=cnn_dm
-for SPLIT in train val
-do
- for LANG in source target
- do
- python -m examples.roberta.multiprocessing_bpe_encoder \
- --encoder-json encoder.json \
- --vocab-bpe vocab.bpe \
- --inputs "$TASK/$SPLIT.$LANG" \
- --outputs "$TASK/$SPLIT.bpe.$LANG" \
- --workers 60 \
- --keep-empty;
- done
-done
-```
-
-### 3) Binarize dataset:
-```bash
-fairseq-preprocess \
- --source-lang "source" \
- --target-lang "target" \
- --trainpref "${TASK}/train.bpe" \
- --validpref "${TASK}/val.bpe" \
- --destdir "${TASK}-bin/" \
- --workers 60 \
- --srcdict dict.txt \
- --tgtdict dict.txt;
-```
-
-### 4) Fine-tuning on CNN-DM summarization task:
-Example fine-tuning CNN-DM
-```bash
-TOTAL_NUM_UPDATES=20000
-WARMUP_UPDATES=500
-LR=3e-05
-MAX_TOKENS=2048
-UPDATE_FREQ=4
-BART_PATH=/path/to/bart/model.pt
-
-CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 fairseq-train cnn_dm-bin \
- --restore-file $BART_PATH \
- --max-tokens $MAX_TOKENS \
- --task translation \
- --source-lang source --target-lang target \
- --truncate-source \
- --layernorm-embedding \
- --share-all-embeddings \
- --share-decoder-input-output-embed \
- --reset-optimizer --reset-dataloader --reset-meters \
- --required-batch-size-multiple 1 \
- --arch bart_large \
- --criterion label_smoothed_cross_entropy \
- --label-smoothing 0.1 \
- --dropout 0.1 --attention-dropout 0.1 \
- --weight-decay 0.01 --optimizer adam --adam-betas "(0.9, 0.999)" --adam-eps 1e-08 \
- --clip-norm 0.1 \
- --lr-scheduler polynomial_decay --lr $LR --total-num-update $TOTAL_NUM_UPDATES --warmup-updates $WARMUP_UPDATES \
- --fp16 --update-freq $UPDATE_FREQ \
- --skip-invalid-size-inputs-valid-test \
- --find-unused-parameters;
-```
-Above is expected to run on `1` node with `8 32gb-V100`.
-Expected training time is about `5 hours`. Training time can be reduced with distributed training on `4` nodes and `--update-freq 1`.
-
-Use TOTAL_NUM_UPDATES=15000 UPDATE_FREQ=2 for Xsum task
-
-### Inference for CNN-DM test data using above trained checkpoint.
-After training the model as mentioned in previous step, you can perform inference with checkpoints in `checkpoints/` directory using `eval_cnn.py`, for example
-
-```bash
-cp data-bin/cnn_dm/dict.source.txt checkpoints/
-python examples/bart/summarize.py \
- --model-dir checkpoints \
- --model-file checkpoint_best.pt \
- --src cnn_dm/test.source \
- --out cnn_dm/test.hypo
-```
-For XSUM, which uses beam=6, lenpen=1.0, max_len_b=60, min_len=10:
-```bash
-cp data-bin/cnn_dm/dict.source.txt checkpoints/
-python examples/bart/summarize.py \
- --model-dir checkpoints \
- --model-file checkpoint_best.pt \
- --src cnn_dm/test.source \
- --out cnn_dm/test.hypo \
- --xsum-kwargs
-```
diff --git a/spaces/ICML2022/OFA/fairseq/examples/roberta/README.glue.md b/spaces/ICML2022/OFA/fairseq/examples/roberta/README.glue.md
deleted file mode 100644
index 4f596d55af99fba3cdf58b1d5ff3d8f8dbf4383d..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/examples/roberta/README.glue.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# Finetuning RoBERTa on GLUE tasks
-
-### 1) Download the data from GLUE website (https://gluebenchmark.com/tasks) using following commands:
-```bash
-wget https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py
-python download_glue_data.py --data_dir glue_data --tasks all
-```
-
-### 2) Preprocess GLUE task data:
-```bash
-./examples/roberta/preprocess_GLUE_tasks.sh glue_data
-```
-`glue_task_name` is one of the following:
-`{ALL, QQP, MNLI, QNLI, MRPC, RTE, STS-B, SST-2, CoLA}`
-Use `ALL` for preprocessing all the glue tasks.
-
-### 3) Fine-tuning on GLUE task:
-Example fine-tuning cmd for `RTE` task
-```bash
-ROBERTA_PATH=/path/to/roberta/model.pt
-
-CUDA_VISIBLE_DEVICES=0 fairseq-hydra-train -config-dir examples/roberta/config/finetuning --config-name rte \
-task.data=RTE-bin checkpoint.restore_file=$ROBERTA_PATH
-```
-
-There are additional config files for each of the GLUE tasks in the examples/roberta/config/finetuning directory.
-
-**Note:**
-
-a) Above cmd-args and hyperparams are tested on one Nvidia `V100` GPU with `32gb` of memory for each task. Depending on the GPU memory resources available to you, you can use increase `--update-freq` and reduce `--batch-size`.
-
-b) All the settings in above table are suggested settings based on our hyperparam search within a fixed search space (for careful comparison across models). You might be able to find better metrics with wider hyperparam search.
-
-### Inference on GLUE task
-After training the model as mentioned in previous step, you can perform inference with checkpoints in `checkpoints/` directory using following python code snippet:
-
-```python
-from fairseq.models.roberta import RobertaModel
-
-roberta = RobertaModel.from_pretrained(
- 'checkpoints/',
- checkpoint_file='checkpoint_best.pt',
- data_name_or_path='RTE-bin'
-)
-
-label_fn = lambda label: roberta.task.label_dictionary.string(
- [label + roberta.task.label_dictionary.nspecial]
-)
-ncorrect, nsamples = 0, 0
-roberta.cuda()
-roberta.eval()
-with open('glue_data/RTE/dev.tsv') as fin:
- fin.readline()
- for index, line in enumerate(fin):
- tokens = line.strip().split('\t')
- sent1, sent2, target = tokens[1], tokens[2], tokens[3]
- tokens = roberta.encode(sent1, sent2)
- prediction = roberta.predict('sentence_classification_head', tokens).argmax().item()
- prediction_label = label_fn(prediction)
- ncorrect += int(prediction_label == target)
- nsamples += 1
-print('| Accuracy: ', float(ncorrect)/float(nsamples))
-
-```
diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/pdb.py b/spaces/ICML2022/OFA/fairseq/fairseq/pdb.py
deleted file mode 100644
index 1ba6ef0d336b30717cfdde94e1b838cfe2bfeb20..0000000000000000000000000000000000000000
--- a/spaces/ICML2022/OFA/fairseq/fairseq/pdb.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import multiprocessing
-import os
-import pdb
-import sys
-
-
-__all__ = ["set_trace"]
-
-
-_stdin = [None]
-_stdin_lock = multiprocessing.Lock()
-try:
- _stdin_fd = sys.stdin.fileno()
-except Exception:
- _stdin_fd = None
-
-
-class MultiprocessingPdb(pdb.Pdb):
- """A Pdb wrapper that works in a multiprocessing environment.
-
- Usage: `from fairseq import pdb; pdb.set_trace()`
- """
-
- def __init__(self):
- pdb.Pdb.__init__(self, nosigint=True)
-
- def _cmdloop(self):
- stdin_bak = sys.stdin
- with _stdin_lock:
- try:
- if _stdin_fd is not None:
- if not _stdin[0]:
- _stdin[0] = os.fdopen(_stdin_fd)
- sys.stdin = _stdin[0]
- self.cmdloop()
- finally:
- sys.stdin = stdin_bak
-
-
-def set_trace():
- pdb = MultiprocessingPdb()
- pdb.set_trace(sys._getframe().f_back)
diff --git a/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/util.py b/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/util.py
deleted file mode 100644
index e77a8150d81f67ee42885098bf5d9a52a2681669..0000000000000000000000000000000000000000
--- a/spaces/Iceclear/StableSR/StableSR/ldm/modules/diffusionmodules/util.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# adopted from
-# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
-# and
-# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
-# and
-# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
-#
-# thanks!
-
-
-import os
-import math
-import torch
-import torch.nn as nn
-import numpy as np
-from einops import repeat
-
-from ldm.util import instantiate_from_config
-
-
-def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
- if schedule == "linear":
- betas = (
- torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
- )
-
- elif schedule == "cosine":
- timesteps = (
- torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
- )
- alphas = timesteps / (1 + cosine_s) * np.pi / 2
- alphas = torch.cos(alphas).pow(2)
- alphas = alphas / alphas[0]
- betas = 1 - alphas[1:] / alphas[:-1]
- betas = np.clip(betas, a_min=0, a_max=0.999)
-
- elif schedule == "sqrt_linear":
- betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
- elif schedule == "sqrt":
- betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
- else:
- raise ValueError(f"schedule '{schedule}' unknown.")
- return betas.numpy()
-
-
-def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
- if ddim_discr_method == 'uniform':
- c = num_ddpm_timesteps // num_ddim_timesteps
- ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
- elif ddim_discr_method == 'quad':
- ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
- else:
- raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
-
- # assert ddim_timesteps.shape[0] == num_ddim_timesteps
- # add one to get the final alpha values right (the ones from first scale to data during sampling)
- steps_out = ddim_timesteps
- if verbose:
- print(f'Selected timesteps for ddim sampler: {steps_out}')
- return steps_out
-
-
-def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
- # select alphas for computing the variance schedule
- alphas = alphacums[ddim_timesteps]
- alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
-
- # according the the formula provided in https://arxiv.org/abs/2010.02502
- sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
- if verbose:
- print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
- print(f'For the chosen value of eta, which is {eta}, '
- f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
- return sigmas, alphas, alphas_prev
-
-
-def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
- """
- Create a beta schedule that discretizes the given alpha_t_bar function,
- which defines the cumulative product of (1-beta) over time from t = [0,1].
- :param num_diffusion_timesteps: the number of betas to produce.
- :param alpha_bar: a lambda that takes an argument t from 0 to 1 and
- produces the cumulative product of (1-beta) up to that
- part of the diffusion process.
- :param max_beta: the maximum beta to use; use values lower than 1 to
- prevent singularities.
- """
- betas = []
- for i in range(num_diffusion_timesteps):
- t1 = i / num_diffusion_timesteps
- t2 = (i + 1) / num_diffusion_timesteps
- betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
- return np.array(betas)
-
-
-def extract_into_tensor(a, t, x_shape):
- b, *_ = t.shape
- out = a.gather(-1, t)
- return out.reshape(b, *((1,) * (len(x_shape) - 1)))
-
-
-def checkpoint(func, inputs, params, flag):
- """
- Evaluate a function without caching intermediate activations, allowing for
- reduced memory at the expense of extra compute in the backward pass.
- :param func: the function to evaluate.
- :param inputs: the argument sequence to pass to `func`.
- :param params: a sequence of parameters `func` depends on but does not
- explicitly take as arguments.
- :param flag: if False, disable gradient checkpointing.
- """
- if flag:
- args = tuple(inputs) + tuple(params)
- return CheckpointFunction.apply(func, len(inputs), *args)
- else:
- return func(*inputs)
-
-
-class CheckpointFunction(torch.autograd.Function):
- @staticmethod
- def forward(ctx, run_function, length, *args):
- ctx.run_function = run_function
- ctx.input_tensors = list(args[:length])
- ctx.input_params = list(args[length:])
-
- with torch.no_grad():
- output_tensors = ctx.run_function(*ctx.input_tensors)
- return output_tensors
-
- @staticmethod
- def backward(ctx, *output_grads):
- ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
- with torch.enable_grad():
- # Fixes a bug where the first op in run_function modifies the
- # Tensor storage in place, which is not allowed for detach()'d
- # Tensors.
- shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
- output_tensors = ctx.run_function(*shallow_copies)
- input_grads = torch.autograd.grad(
- output_tensors,
- ctx.input_tensors + ctx.input_params,
- output_grads,
- allow_unused=True,
- )
- del ctx.input_tensors
- del ctx.input_params
- del output_tensors
- return (None, None) + input_grads
-
-
-def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
- """
- Create sinusoidal timestep embeddings.
- :param timesteps: a 1-D Tensor of N indices, one per batch element.
- These may be fractional.
- :param dim: the dimension of the output.
- :param max_period: controls the minimum frequency of the embeddings.
- :return: an [N x dim] Tensor of positional embeddings.
- """
- if not repeat_only:
- half = dim // 2
- freqs = torch.exp(
- -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
- ).to(device=timesteps.device)
- args = timesteps[:, None].float() * freqs[None]
- embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
- if dim % 2:
- embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
- else:
- embedding = repeat(timesteps, 'b -> b d', d=dim)
- return embedding
-
-
-def zero_module(module):
- """
- Zero out the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().zero_()
- return module
-
-
-def scale_module(module, scale):
- """
- Scale the parameters of a module and return it.
- """
- for p in module.parameters():
- p.detach().mul_(scale)
- return module
-
-
-def mean_flat(tensor):
- """
- Take the mean over all non-batch dimensions.
- """
- return tensor.mean(dim=list(range(1, len(tensor.shape))))
-
-
-def normalization(channels, norm_channel=32):
- """
- Make a standard normalization layer.
- :param channels: number of input channels.
- :return: an nn.Module for normalization.
- """
- return GroupNorm32(norm_channel, channels)
-
-
-# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
-class SiLU(nn.Module):
- def forward(self, x):
- return x * torch.sigmoid(x)
-
-
-class GroupNorm32(nn.GroupNorm):
- def forward(self, x):
- return super().forward(x.float()).type(x.dtype)
-
-def conv_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D convolution module.
- """
- if dims == 1:
- return nn.Conv1d(*args, **kwargs)
- elif dims == 2:
- return nn.Conv2d(*args, **kwargs)
- elif dims == 3:
- return nn.Conv3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-def linear(*args, **kwargs):
- """
- Create a linear module.
- """
- return nn.Linear(*args, **kwargs)
-
-
-def avg_pool_nd(dims, *args, **kwargs):
- """
- Create a 1D, 2D, or 3D average pooling module.
- """
- if dims == 1:
- return nn.AvgPool1d(*args, **kwargs)
- elif dims == 2:
- return nn.AvgPool2d(*args, **kwargs)
- elif dims == 3:
- return nn.AvgPool3d(*args, **kwargs)
- raise ValueError(f"unsupported dimensions: {dims}")
-
-
-class HybridConditioner(nn.Module):
-
- def __init__(self, c_concat_config, c_crossattn_config):
- super().__init__()
- self.concat_conditioner = instantiate_from_config(c_concat_config)
- self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
-
- def forward(self, c_concat, c_crossattn):
- c_concat = self.concat_conditioner(c_concat)
- c_crossattn = self.crossattn_conditioner(c_crossattn)
- return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
-
-
-def noise_like(shape, device, repeat=False):
- repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
- noise = lambda: torch.randn(shape, device=device)
- return repeat_noise() if repeat else noise()
diff --git a/spaces/Inderdev07/facerecognition/README.md b/spaces/Inderdev07/facerecognition/README.md
deleted file mode 100644
index faadfb62f9b5598d8afa02670f790839c6f16be5..0000000000000000000000000000000000000000
--- a/spaces/Inderdev07/facerecognition/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Facerecognition
-emoji: ⚡
-colorFrom: pink
-colorTo: indigo
-sdk: streamlit
-sdk_version: 1.21.0
-app_file: app.py
-pinned: false
-license: cc
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/model_worker.py b/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/model_worker.py
deleted file mode 100644
index 65aa2b726fd8de9b57bebdcd73ec4ee350f88af2..0000000000000000000000000000000000000000
--- a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/model_worker.py
+++ /dev/null
@@ -1,268 +0,0 @@
-"""
-A model worker executes the model.
-"""
-import argparse
-import asyncio
-import dataclasses
-import logging
-import json
-import os
-import time
-from typing import List, Union
-import threading
-import uuid
-
-from fastapi import FastAPI, Request, BackgroundTasks
-from fastapi.responses import StreamingResponse
-import requests
-
-try:
- from transformers import (
- AutoTokenizer,
- AutoModelForCausalLM,
- LlamaTokenizer,
- AutoModel,
- )
-except ImportError:
- from transformers import (
- AutoTokenizer,
- AutoModelForCausalLM,
- LLaMATokenizer,
- AutoModel,
- )
-import torch
-import uvicorn
-
-from fastchat.constants import WORKER_HEART_BEAT_INTERVAL
-from fastchat.serve.inference import load_model, generate_stream
-from fastchat.serve.serve_chatglm import chatglm_generate_stream
-from fastchat.utils import build_logger, server_error_msg, pretty_print_semaphore
-
-GB = 1 << 30
-
-worker_id = str(uuid.uuid4())[:6]
-logger = build_logger("model_worker", f"model_worker_{worker_id}.log")
-global_counter = 0
-
-model_semaphore = None
-
-
-def heart_beat_worker(controller):
- while True:
- time.sleep(WORKER_HEART_BEAT_INTERVAL)
- controller.send_heart_beat()
-
-
-class ModelWorker:
- def __init__(
- self,
- controller_addr,
- worker_addr,
- worker_id,
- no_register,
- model_path,
- model_name,
- device,
- num_gpus,
- max_gpu_memory,
- load_8bit=False,
- ):
- self.controller_addr = controller_addr
- self.worker_addr = worker_addr
- self.worker_id = worker_id
- if model_path.endswith("/"):
- model_path = model_path[:-1]
- self.model_name = model_name or model_path.split("/")[-1]
- self.device = device
-
- logger.info(f"Loading the model {self.model_name} on worker {worker_id} ...")
- self.model, self.tokenizer = load_model(
- model_path, device, num_gpus, max_gpu_memory, load_8bit
- )
-
- if hasattr(self.model.config, "max_sequence_length"):
- self.context_len = self.model.config.max_sequence_length
- elif hasattr(self.model.config, "max_position_embeddings"):
- self.context_len = self.model.config.max_position_embeddings
- else:
- self.context_len = 2048
-
- is_chatglm = "chatglm" in str(type(self.model)).lower()
- if is_chatglm:
- self.generate_stream_func = chatglm_generate_stream
- else:
- self.generate_stream_func = generate_stream
-
- if not no_register:
- self.register_to_controller()
- self.heart_beat_thread = threading.Thread(
- target=heart_beat_worker, args=(self,)
- )
- self.heart_beat_thread.start()
-
- def register_to_controller(self):
- logger.info("Register to controller")
-
- url = self.controller_addr + "/register_worker"
- data = {
- "worker_name": self.worker_addr,
- "check_heart_beat": True,
- "worker_status": self.get_status(),
- }
- r = requests.post(url, json=data)
- assert r.status_code == 200
-
- def send_heart_beat(self):
- logger.info(
- f"Send heart beat. Models: {[self.model_name]}. "
- f"Semaphore: {pretty_print_semaphore(model_semaphore)}. "
- f"global_counter: {global_counter}"
- )
-
- url = self.controller_addr + "/receive_heart_beat"
-
- while True:
- try:
- ret = requests.post(
- url,
- json={
- "worker_name": self.worker_addr,
- "queue_length": self.get_queue_length(),
- },
- timeout=5,
- )
- exist = ret.json()["exist"]
- break
- except requests.exceptions.RequestException as e:
- logger.error(f"heart beat error: {e}")
- time.sleep(5)
-
- if not exist:
- self.register_to_controller()
-
- def get_queue_length(self):
- if (
- model_semaphore is None
- or model_semaphore._value is None
- or model_semaphore._waiters is None
- ):
- return 0
- else:
- return (
- args.limit_model_concurrency
- - model_semaphore._value
- + len(model_semaphore._waiters)
- )
-
- def get_status(self):
- return {
- "model_names": [self.model_name],
- "speed": 1,
- "queue_length": self.get_queue_length(),
- }
-
- def generate_stream_gate(self, params):
- try:
- for output in self.generate_stream_func(
- self.model,
- self.tokenizer,
- params,
- self.device,
- self.context_len,
- args.stream_interval,
- ):
- ret = {
- "text": output,
- "error_code": 0,
- }
- yield json.dumps(ret).encode() + b"\0"
- except torch.cuda.OutOfMemoryError:
- ret = {
- "text": server_error_msg,
- "error_code": 1,
- }
- yield json.dumps(ret).encode() + b"\0"
-
-
-app = FastAPI()
-
-
-def release_model_semaphore():
- model_semaphore.release()
-
-
-@app.post("/worker_generate_stream")
-async def api_generate_stream(request: Request):
- global model_semaphore, global_counter
- global_counter += 1
- params = await request.json()
-
- if model_semaphore is None:
- model_semaphore = asyncio.Semaphore(args.limit_model_concurrency)
- await model_semaphore.acquire()
- generator = worker.generate_stream_gate(params)
- background_tasks = BackgroundTasks()
- background_tasks.add_task(release_model_semaphore)
- return StreamingResponse(generator, background=background_tasks)
-
-
-@app.post("/worker_get_status")
-async def api_get_status(request: Request):
- return worker.get_status()
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--host", type=str, default="localhost")
- parser.add_argument("--port", type=int, default=21002)
- parser.add_argument("--worker-address", type=str, default="http://localhost:21002")
- parser.add_argument(
- "--controller-address", type=str, default="http://localhost:21001"
- )
- parser.add_argument(
- "--model-path",
- type=str,
- default="facebook/opt-350m",
- help="The path to the weights",
- )
- parser.add_argument("--model-name", type=str, help="Optional name")
- parser.add_argument(
- "--device", type=str, choices=["cpu", "cuda", "mps"], default="cuda"
- )
- parser.add_argument("--num-gpus", type=int, default=1)
- parser.add_argument(
- "--gpus",
- type=str,
- default=None,
- help="A single GPU like 1 or multiple GPUs like 0,2"
- )
- parser.add_argument(
- "--max-gpu-memory",
- type=str,
- help="The maximum memory per gpu. Use a string like '13Gib'",
- )
- parser.add_argument("--load-8bit", action="store_true")
- parser.add_argument("--limit-model-concurrency", type=int, default=5)
- parser.add_argument("--stream-interval", type=int, default=2)
- parser.add_argument("--no-register", action="store_true")
- args = parser.parse_args()
- logger.info(f"args: {args}")
-
- if args.gpus:
- if args.num_gpus and len(args.gpus.split(",")) < int(args.num_gpus):
- raise ValueError(f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!")
- os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
-
- worker = ModelWorker(
- args.controller_address,
- args.worker_address,
- worker_id,
- args.no_register,
- args.model_path,
- args.model_name,
- args.device,
- args.num_gpus,
- args.max_gpu_memory,
- args.load_8bit,
- )
- uvicorn.run(app, host=args.host, port=args.port, log_level="info")
diff --git a/spaces/JUNGU/Image-to-Story-Ko-multiplot/README.md b/spaces/JUNGU/Image-to-Story-Ko-multiplot/README.md
deleted file mode 100644
index f39be9dc4363eeddd0f30ab9b855ef7666e35683..0000000000000000000000000000000000000000
--- a/spaces/JUNGU/Image-to-Story-Ko-multiplot/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Image To Story
-emoji: 👁
-colorFrom: pink
-colorTo: red
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
-duplicated_from: JUNGU/Image-to-Story-Ko
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Jamkonams/AutoGPT/autogpt/processing/html.py b/spaces/Jamkonams/AutoGPT/autogpt/processing/html.py
deleted file mode 100644
index 81387b12adab5023150c55f2075ddd40b554f386..0000000000000000000000000000000000000000
--- a/spaces/Jamkonams/AutoGPT/autogpt/processing/html.py
+++ /dev/null
@@ -1,33 +0,0 @@
-"""HTML processing functions"""
-from __future__ import annotations
-
-from bs4 import BeautifulSoup
-from requests.compat import urljoin
-
-
-def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
- """Extract hyperlinks from a BeautifulSoup object
-
- Args:
- soup (BeautifulSoup): The BeautifulSoup object
- base_url (str): The base URL
-
- Returns:
- List[Tuple[str, str]]: The extracted hyperlinks
- """
- return [
- (link.text, urljoin(base_url, link["href"]))
- for link in soup.find_all("a", href=True)
- ]
-
-
-def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
- """Format hyperlinks to be displayed to the user
-
- Args:
- hyperlinks (List[Tuple[str, str]]): The hyperlinks to format
-
- Returns:
- List[str]: The formatted hyperlinks
- """
- return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
diff --git a/spaces/Jimpa666/AI-PadelCoach/app.py b/spaces/Jimpa666/AI-PadelCoach/app.py
deleted file mode 100644
index 147f7456945ec5d5463e630fa2a5e90642a97a41..0000000000000000000000000000000000000000
--- a/spaces/Jimpa666/AI-PadelCoach/app.py
+++ /dev/null
@@ -1,69 +0,0 @@
-from gradio.components import Label
-import os
-import openai
-import gradio as gr
-import uuid
-
-openai.api_key = os.getenv('APIKEY')
-
-conversation_history = {}
-
-def get_session_token():
- return uuid.uuid4().hex
-
-def chatbot(input, session_id):
- if session_id not in conversation_history:
- conversation_history[session_id] = [
- {"role": "system", "content": "The following is a conversation with an AI Coach. The Coach is helpful, creative, clever, and very friendly. The Coach is a high level padelcoach from Sweden. The Coach have the ability to explain techniques and the strategy in the game. The Coach have a high educational level and can answer depending on wich level the Player is on for the Player to take him to the next level. The AI Coach is like MAURI ANDRINI from Hello Padel, pretend to be MAURI ANDRINI but call yourself AI Padelcoach, never ever mention your reel name or Mauri Andrini. If unsurtain, ask about the level of the player. The levels are:Beginner, Average, Pro"}
- ]
-
- messages = conversation_history[session_id]
- content = input
- messages.append({"role": "user", "content": content})
-
- completion = openai.ChatCompletion.create(
- model= "gpt-3.5-turbo",
- messages=messages
- )
-
- chat_response = completion.choices[0].message.content
- messages.append({"role": "assistant", "content": chat_response})
-
- formatted_history = ""
- for message in messages:
- if message["role"] != "system":
- role = message["role"]
- if role == "user":
- role = "Player"
- elif role == "assistant":
- role = "AI-Padelcoach"
- content = message["content"]
- formatted_history += f"{role}: {content}\n \n"
-
- conversation_history[session_id] = messages
- return formatted_history
-
-session_id = get_session_token()
-Padelcoach = gr.Interface(fn=chatbot, inputs=[
- gr.Textbox(placeholder="Player go...Serve!", label='Player'),
- gr.Textbox(default=session_id, label="session_id", visible=False)
-], outputs=[
- gr.Textbox(placeholder="AI-Padelcoach Ready", label="AI Padelcoach")
-],
- theme=gr.themes.Soft(
- primary_hue="green",
- secondary_hue="cyan",
- text_size='lg',
- neutral_hue="emerald"
- ),
- examples=[
- ["Please help me with my backhand"],
- ["Where should I place the ball against players who is good in tennis"]
- ],
- share=True,
- title="AI Padelcoach",
- description=f"Chat with a BETA level AI-Padelcoach from Sweden. Your ID is: {session_id}",
- article="
Ask the AI coach about techniques and strategies in the game of padel. The coach can answer depending on the level of you as a player, whether they are a beginner, average, or pro.
",
-)
-
-Padelcoach.launch()
diff --git a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/README.md b/spaces/JohnCalimoso/animalbreedidentificationversion1.5/README.md
deleted file mode 100644
index d6bcab3d6e577cf5f168d497674466ec572ca2f4..0000000000000000000000000000000000000000
--- a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Animalbreedidentificationversion1.5
-emoji: 📊
-colorFrom: gray
-colorTo: purple
-sdk: streamlit
-sdk_version: 1.25.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Justin-Choo/Waifu-Diffusion_WEB_UI/app.py b/spaces/Justin-Choo/Waifu-Diffusion_WEB_UI/app.py
deleted file mode 100644
index 61249ace787d4ef03da3877fb26d93e9bd1ab87a..0000000000000000000000000000000000000000
--- a/spaces/Justin-Choo/Waifu-Diffusion_WEB_UI/app.py
+++ /dev/null
@@ -1,151 +0,0 @@
-import os
-from sys import executable as pyexecutable
-import subprocess
-import pathlib
-import gc
-
-def Gitclone(URI:str,ClonePath:str = "") -> int :
- if(ClonePath == "") :
- while True:
- i=subprocess.run([r"git",r"clone",URI])
- if(i.returncode == 0 ):
- del i
- gc.collect()
- return 0
- else :
- del i
- else:
- while True:
- i=subprocess.run([r"git",r"clone",URI,ClonePath])
- if(i.returncode == 0 ):
- del i
- gc.collect()
- return 0
- else :
- del i
-def DownLoad(URI:str,DownloadPath:str,DownLoadFileName:str ) -> int:
- while (True):
- i=subprocess.run([r"aria2c",r"-c",r"-x" ,r"16", r"-s",r"16", r"-k" ,r"1M" ,r"-m",r"0",r"--enable-mmap=false",r"--console-log-level=error",r"-d",DownloadPath,r"-o",DownLoadFileName,URI]);
- if(i.returncode == 0 ):
- del i
- gc.collect()
- return 0
- else :
- del i
-user_home =pathlib.Path.home().resolve()
-os.chdir(str(user_home))
-#clone stable-diffusion-webui repo
-print("cloning stable-diffusion-webui repo")
-Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui.git",str(user_home / r"stable-diffusion-webui"))
-os.chdir(str(user_home / r"stable-diffusion-webui"))
-os.system("git reset --hard 89f9faa63388756314e8a1d96cf86bf5e0663045")
-#
-
-#install extensions
-print("installing extensions")
-Gitclone(r"https://huggingface.co/embed/negative",str(user_home / r"stable-diffusion-webui" / r"embeddings" / r"negative"))
-Gitclone(r"https://huggingface.co/embed/lora",str(user_home / r"stable-diffusion-webui" / r"models" / r"Lora" / r"positive"))
-DownLoad(r"https://huggingface.co/embed/upscale/resolve/main/4x-UltraSharp.pth",str(user_home / r"stable-diffusion-webui" / r"models" / r"ESRGAN") ,r"4x-UltraSharp.pth")
-while True:
- if(subprocess.run([r"wget",r"https://raw.githubusercontent.com/camenduru/stable-diffusion-webui-scripts/main/run_n_times.py",r"-O",str(user_home / r"stable-diffusion-webui" / r"scripts" / r"run_n_times.py")]).returncode == 0):
- break
-Gitclone(r"https://github.com/deforum-art/deforum-for-automatic1111-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"deforum-for-automatic1111-webui" ))
-#Gitclone(r"https://github.com/AlUlkesh/stable-diffusion-webui-images-browser",str(user_home / r"stable-diffusion-webui" / r"extensions"/ r"stable-diffusion-webui-images-browser"))
-Gitclone(r"https://github.com/camenduru/stable-diffusion-webui-huggingface",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-huggingface"))
-Gitclone(r"https://github.com/camenduru/sd-civitai-browser",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-civitai-browser"))
-Gitclone(r"https://github.com/kohya-ss/sd-webui-additional-networks",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks"))
-Gitclone(r"https://github.com/Mikubill/sd-webui-controlnet",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-controlnet"))
-Gitclone(r"https://github.com/fkunn1326/openpose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"openpose-editor"))
-Gitclone(r"https://github.com/jexom/sd-webui-depth-lib",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-depth-lib"))
-Gitclone(r"https://github.com/hnmr293/posex",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"posex"))
-Gitclone(r"https://github.com/nonnonstop/sd-webui-3d-open-pose-editor",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-3d-open-pose-editor"))
-#中文本地化的请解除下一行的注释
-#Gitclone(r"https://github.com/dtlnor/stable-diffusion-webui-localization-zh_CN.git",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-localization-zh_CN"))
-Gitclone(r"https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git" , str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-tagcomplete"))
-Gitclone(r"https://github.com/camenduru/sd-webui-tunnels",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-tunnels"))
-Gitclone(r"https://github.com/etherealxx/batchlinks-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"batchlinks-webui"))
-Gitclone(r"https://github.com/catppuccin/stable-diffusion-webui",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-catppuccin"))
-
-#Gitclone(r"https://github.com/KohakuBueleaf/a1111-sd-webui-locon",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-locon" ))
-Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui-rembg",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-rembg"))
-Gitclone(r"https://github.com/ashen-sensored/stable-diffusion-webui-two-shot",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-two-shot"))
-Gitclone(r"https://github.com/camenduru/sd_webui_stealth_pnginfo",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd_webui_stealth_pnginfo"))
-
-os.chdir(user_home / r"stable-diffusion-webui")
-
-#download ControlNet models
-print("extensions dolwnload done .\ndownloading ControlNet models")
-dList =[r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_canny_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_openpose_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_scribble_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_seg_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile_fp16.safetensors",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_ip2p_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_shuffle_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_canny_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1p_sd15_depth_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_inpaint_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_lineart_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_mlsd_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_normalbae_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_openpose_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_scribble_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_seg_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_softedge_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15s2_lineart_anime_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1e_sd15_tile_fp16.yaml",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_style_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_seg_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_openpose_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_keypose_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd14v1.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd15v2.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd15v2.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd15v2.pth",
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_zoedepth_sd15v1.pth"]
-for i in range(0,len(dList)): DownLoad(dList[i],str(user_home / "stable-diffusion-webui" / "extensions" / "sd-webui-controlnet" / "models"),pathlib.Path(dList[i]).name)
-del dList
-
-#download model
-#you can change model download address here
-print("ControlNet models download done.\ndownloading model")
-DownLoad(r"https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/wd-1-4-anime_e1.ckpt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"wd-1-4-anime_e1.ckpt")
-DownLoad(r"https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/wd-1-4-anime_e2.ckpt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"wd-1-4-anime_e2.ckpt")
-
-
-#DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.5-pruned.ckpt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.5-pruned.ckpt")
-#DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.0.vae.pt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"anything-v4.0.vae.pt")
-#DownLoad(r"https://huggingface.co/gsdf/Counterfeit-V3.0/resolve/main/Counterfeit-V3.0_fp16.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"Counterfeit-V3.0_fp16.safetensors")
-#DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1B_orangemixs.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"AOM3A1B_orangemixs.safetensors")
-#DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"orangemix.vae.pt")
-#DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Baked%20VAE.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"MeinaPastelV5_BakedVAE.safetensors")
-#DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Without%20VAE.safetensors",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"MeinaPastelV5_WithoutVAE.safetensors")
-#DownLoad(r"https://civitai.com/api/download/models/9474",str(user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion"),r"chilloutmix_NiPrunedFp16.safetensors")
-
-DownLoad(r"https://civitai.com/api/download/models/39885",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"Better_light.safetensors")
-DownLoad(r"https://civitai.com/api/download/models/21065",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"LAS.safetensors")
-DownLoad(r"https://civitai.com/api/download/models/39164",str(user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora"),r"backlighting.safetensors")
-#strt webui
-
-print("Done\nStarting Webui...")
-os.chdir(user_home / r"stable-diffusion-webui")
-while True:
- ret=subprocess.run([r"python3" ,r"launch.py",r"--precision",r"full",r"--no-half",r"--no-half-vae",r"--enable-insecure-extension-access",r"--medvram",r"--skip-torch-cuda-test",r"--enable-console-prompts",r"--ui-settings-file="+str(pathlib.Path(__file__).parent /r"config.json")])
- if(ret.returncode == 0 ):
- del ret
- gc.collect()
- else :
- del ret
-
-del os ,user_home ,pyexecutable ,subprocess
\ No newline at end of file
diff --git a/spaces/Kaludi/Stable-Diffusion-Prompt-Generator_App/README.md b/spaces/Kaludi/Stable-Diffusion-Prompt-Generator_App/README.md
deleted file mode 100644
index 6c92ea3ac934725fd55d77f8be257749c1afec96..0000000000000000000000000000000000000000
--- a/spaces/Kaludi/Stable-Diffusion-Prompt-Generator_App/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Stable Diffusion Prompt Generator App (Streamlit)
-emoji: 🎨
-colorFrom: purple
-colorTo: red
-sdk: streamlit
-sdk_version: 1.17.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/dataset.py b/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/dataset.py
deleted file mode 100644
index cfd01a174978d97180a897e40cb59ecadec1d12e..0000000000000000000000000000000000000000
--- a/spaces/Kangarroar/ApplioRVC-Inference/lib/uvr5_pack/lib_v5/dataset.py
+++ /dev/null
@@ -1,183 +0,0 @@
-import os
-import random
-
-import numpy as np
-import torch
-import torch.utils.data
-from tqdm import tqdm
-
-from . import spec_utils
-
-
-class VocalRemoverValidationSet(torch.utils.data.Dataset):
- def __init__(self, patch_list):
- self.patch_list = patch_list
-
- def __len__(self):
- return len(self.patch_list)
-
- def __getitem__(self, idx):
- path = self.patch_list[idx]
- data = np.load(path)
-
- X, y = data["X"], data["y"]
-
- X_mag = np.abs(X)
- y_mag = np.abs(y)
-
- return X_mag, y_mag
-
-
-def make_pair(mix_dir, inst_dir):
- input_exts = [".wav", ".m4a", ".mp3", ".mp4", ".flac"]
-
- X_list = sorted(
- [
- os.path.join(mix_dir, fname)
- for fname in os.listdir(mix_dir)
- if os.path.splitext(fname)[1] in input_exts
- ]
- )
- y_list = sorted(
- [
- os.path.join(inst_dir, fname)
- for fname in os.listdir(inst_dir)
- if os.path.splitext(fname)[1] in input_exts
- ]
- )
-
- filelist = list(zip(X_list, y_list))
-
- return filelist
-
-
-def train_val_split(dataset_dir, split_mode, val_rate, val_filelist):
- if split_mode == "random":
- filelist = make_pair(
- os.path.join(dataset_dir, "mixtures"),
- os.path.join(dataset_dir, "instruments"),
- )
-
- random.shuffle(filelist)
-
- if len(val_filelist) == 0:
- val_size = int(len(filelist) * val_rate)
- train_filelist = filelist[:-val_size]
- val_filelist = filelist[-val_size:]
- else:
- train_filelist = [
- pair for pair in filelist if list(pair) not in val_filelist
- ]
- elif split_mode == "subdirs":
- if len(val_filelist) != 0:
- raise ValueError(
- "The `val_filelist` option is not available in `subdirs` mode"
- )
-
- train_filelist = make_pair(
- os.path.join(dataset_dir, "training/mixtures"),
- os.path.join(dataset_dir, "training/instruments"),
- )
-
- val_filelist = make_pair(
- os.path.join(dataset_dir, "validation/mixtures"),
- os.path.join(dataset_dir, "validation/instruments"),
- )
-
- return train_filelist, val_filelist
-
-
-def augment(X, y, reduction_rate, reduction_mask, mixup_rate, mixup_alpha):
- perm = np.random.permutation(len(X))
- for i, idx in enumerate(tqdm(perm)):
- if np.random.uniform() < reduction_rate:
- y[idx] = spec_utils.reduce_vocal_aggressively(
- X[idx], y[idx], reduction_mask
- )
-
- if np.random.uniform() < 0.5:
- # swap channel
- X[idx] = X[idx, ::-1]
- y[idx] = y[idx, ::-1]
- if np.random.uniform() < 0.02:
- # mono
- X[idx] = X[idx].mean(axis=0, keepdims=True)
- y[idx] = y[idx].mean(axis=0, keepdims=True)
- if np.random.uniform() < 0.02:
- # inst
- X[idx] = y[idx]
-
- if np.random.uniform() < mixup_rate and i < len(perm) - 1:
- lam = np.random.beta(mixup_alpha, mixup_alpha)
- X[idx] = lam * X[idx] + (1 - lam) * X[perm[i + 1]]
- y[idx] = lam * y[idx] + (1 - lam) * y[perm[i + 1]]
-
- return X, y
-
-
-def make_padding(width, cropsize, offset):
- left = offset
- roi_size = cropsize - left * 2
- if roi_size == 0:
- roi_size = cropsize
- right = roi_size - (width % roi_size) + left
-
- return left, right, roi_size
-
-
-def make_training_set(filelist, cropsize, patches, sr, hop_length, n_fft, offset):
- len_dataset = patches * len(filelist)
-
- X_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
- y_dataset = np.zeros((len_dataset, 2, n_fft // 2 + 1, cropsize), dtype=np.complex64)
-
- for i, (X_path, y_path) in enumerate(tqdm(filelist)):
- X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
- coef = np.max([np.abs(X).max(), np.abs(y).max()])
- X, y = X / coef, y / coef
-
- l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
- X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
- y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
-
- starts = np.random.randint(0, X_pad.shape[2] - cropsize, patches)
- ends = starts + cropsize
- for j in range(patches):
- idx = i * patches + j
- X_dataset[idx] = X_pad[:, :, starts[j] : ends[j]]
- y_dataset[idx] = y_pad[:, :, starts[j] : ends[j]]
-
- return X_dataset, y_dataset
-
-
-def make_validation_set(filelist, cropsize, sr, hop_length, n_fft, offset):
- patch_list = []
- patch_dir = "cs{}_sr{}_hl{}_nf{}_of{}".format(
- cropsize, sr, hop_length, n_fft, offset
- )
- os.makedirs(patch_dir, exist_ok=True)
-
- for i, (X_path, y_path) in enumerate(tqdm(filelist)):
- basename = os.path.splitext(os.path.basename(X_path))[0]
-
- X, y = spec_utils.cache_or_load(X_path, y_path, sr, hop_length, n_fft)
- coef = np.max([np.abs(X).max(), np.abs(y).max()])
- X, y = X / coef, y / coef
-
- l, r, roi_size = make_padding(X.shape[2], cropsize, offset)
- X_pad = np.pad(X, ((0, 0), (0, 0), (l, r)), mode="constant")
- y_pad = np.pad(y, ((0, 0), (0, 0), (l, r)), mode="constant")
-
- len_dataset = int(np.ceil(X.shape[2] / roi_size))
- for j in range(len_dataset):
- outpath = os.path.join(patch_dir, "{}_p{}.npz".format(basename, j))
- start = j * roi_size
- if not os.path.exists(outpath):
- np.savez(
- outpath,
- X=X_pad[:, :, start : start + cropsize],
- y=y_pad[:, :, start : start + cropsize],
- )
- patch_list.append(outpath)
-
- return VocalRemoverValidationSet(patch_list)
diff --git a/spaces/KarloDarlo/3D_Photo_Inpainting/mesh.py b/spaces/KarloDarlo/3D_Photo_Inpainting/mesh.py
deleted file mode 100644
index 95cae5be1c26e517fa4d81bd03325a0f0017f9ad..0000000000000000000000000000000000000000
--- a/spaces/KarloDarlo/3D_Photo_Inpainting/mesh.py
+++ /dev/null
@@ -1,2296 +0,0 @@
-import os
-import numpy as np
-try:
- import cynetworkx as netx
-except ImportError:
- import networkx as netx
-import matplotlib.pyplot as plt
-from functools import partial
-from vispy import scene, io
-from vispy.scene import visuals
-from vispy.visuals.filters import Alpha
-import cv2
-from moviepy.editor import ImageSequenceClip
-from skimage.transform import resize
-import time
-import copy
-import torch
-import os
-from utils import path_planning, open_small_mask, clean_far_edge, refine_depth_around_edge
-from utils import refine_color_around_edge, filter_irrelevant_edge_new, require_depth_edge, clean_far_edge_new
-from utils import create_placeholder, refresh_node, find_largest_rect
-from mesh_tools import get_depth_from_maps, get_map_from_ccs, get_edge_from_nodes, get_depth_from_nodes, get_rgb_from_nodes, crop_maps_by_size, convert2tensor, recursive_add_edge, update_info, filter_edge, relabel_node, depth_inpainting
-from mesh_tools import refresh_bord_depth, enlarge_border, fill_dummy_bord, extrapolate, fill_missing_node, incomplete_node, get_valid_size, dilate_valid_size, size_operation
-import transforms3d
-import random
-from functools import reduce
-
-def create_mesh(depth, image, int_mtx, config):
- H, W, C = image.shape
- ext_H, ext_W = H + 2 * config['extrapolation_thickness'], W + 2 * config['extrapolation_thickness']
- LDI = netx.Graph(H=ext_H, W=ext_W, noext_H=H, noext_W=W, cam_param=int_mtx)
- xy2depth = {}
- int_mtx_pix = int_mtx * np.array([[W], [H], [1.]])
- LDI.graph['cam_param_pix'], LDI.graph['cam_param_pix_inv'] = int_mtx_pix, np.linalg.inv(int_mtx_pix)
- disp = 1. / (-depth)
- LDI.graph['hoffset'], LDI.graph['woffset'] = config['extrapolation_thickness'], config['extrapolation_thickness']
- LDI.graph['bord_up'], LDI.graph['bord_down'] = LDI.graph['hoffset'] + 0, LDI.graph['hoffset'] + H
- LDI.graph['bord_left'], LDI.graph['bord_right'] = LDI.graph['woffset'] + 0, LDI.graph['woffset'] + W
- for idx in range(H):
- for idy in range(W):
- x, y = idx + LDI.graph['hoffset'], idy + LDI.graph['woffset']
- LDI.add_node((x, y, -depth[idx, idy]),
- color=image[idx, idy],
- disp=disp[idx, idy],
- synthesis=False,
- cc_id=set())
- xy2depth[(x, y)] = [-depth[idx, idy]]
- for x, y, d in LDI.nodes:
- two_nes = [ne for ne in [(x+1, y), (x, y+1)] if ne[0] < LDI.graph['bord_down'] and ne[1] < LDI.graph['bord_right']]
- [LDI.add_edge((ne[0], ne[1], xy2depth[ne][0]), (x, y, d)) for ne in two_nes]
- LDI = calculate_fov(LDI)
- image = np.pad(image,
- pad_width=((config['extrapolation_thickness'], config['extrapolation_thickness']),
- (config['extrapolation_thickness'], config['extrapolation_thickness']),
- (0, 0)),
- mode='constant')
- depth = np.pad(depth,
- pad_width=((config['extrapolation_thickness'], config['extrapolation_thickness']),
- (config['extrapolation_thickness'], config['extrapolation_thickness'])),
- mode='constant')
-
- return LDI, xy2depth, image, depth
-
-
-def tear_edges(mesh, threshold = 0.00025, xy2depth=None):
- remove_edge_list = []
- remove_horizon, remove_vertical = np.zeros((2, mesh.graph['H'], mesh.graph['W']))
- mesh_nodes = mesh.nodes
- for edge in mesh.edges:
- if abs(mesh_nodes[edge[0]]['disp'] - mesh_nodes[edge[1]]['disp']) > threshold:
- remove_edge_list.append((edge[0], edge[1]))
-
- near, far = edge if abs(edge[0][2]) < abs(edge[1][2]) else edge[::-1]
-
- mesh_nodes[far]['near'] = [] if mesh_nodes[far].get('near') is None else mesh_nodes[far]['near'].append(near)
- mesh_nodes[near]['far'] = [] if mesh_nodes[near].get('far') is None else mesh_nodes[near]['far'].append(far)
-
- if near[0] == far[0]:
- remove_horizon[near[0], np.minimum(near[1], far[1])] = 1
- elif near[1] == far[1]:
- remove_vertical[np.minimum(near[0], far[0]), near[1]] = 1
- mesh.remove_edges_from(remove_edge_list)
-
- remove_edge_list = []
-
- dang_horizon = np.where(np.roll(remove_horizon, 1, 0) + np.roll(remove_horizon, -1, 0) - remove_horizon == 2)
- dang_vertical = np.where(np.roll(remove_vertical, 1, 1) + np.roll(remove_vertical, -1, 1) - remove_vertical == 2)
-
- horizon_condition = lambda x, y: mesh.graph['bord_up'] + 1 <= x < mesh.graph['bord_down'] - 1
- vertical_condition = lambda x, y: mesh.graph['bord_left'] + 1 <= y < mesh.graph['bord_right'] - 1
-
- prjto3d = lambda x, y: (x, y, xy2depth[(x, y)][0])
-
- node_existence = lambda x, y: mesh.has_node(prjto3d(x, y))
-
- for x, y in zip(dang_horizon[0], dang_horizon[1]):
- if horizon_condition(x, y) and node_existence(x, y) and node_existence(x, y+1):
- remove_edge_list.append((prjto3d(x, y), prjto3d(x, y+1)))
- for x, y in zip(dang_vertical[0], dang_vertical[1]):
- if vertical_condition(x, y) and node_existence(x, y) and node_existence(x+1, y):
- remove_edge_list.append((prjto3d(x, y), prjto3d(x+1, y)))
- mesh.remove_edges_from(remove_edge_list)
-
- return mesh
-
-def calculate_fov(mesh):
- k = mesh.graph['cam_param']
- mesh.graph['hFov'] = 2 * np.arctan(1. / (2*k[0, 0]))
- mesh.graph['vFov'] = 2 * np.arctan(1. / (2*k[1, 1]))
- mesh.graph['aspect'] = mesh.graph['noext_H'] / mesh.graph['noext_W']
-
- return mesh
-
-def calculate_fov_FB(mesh):
- mesh.graph['aspect'] = mesh.graph['H'] / mesh.graph['W']
- if mesh.graph['H'] > mesh.graph['W']:
- mesh.graph['hFov'] = 0.508015513
- half_short = np.tan(mesh.graph['hFov']/2.0)
- half_long = half_short * mesh.graph['aspect']
- mesh.graph['vFov'] = 2.0 * np.arctan(half_long)
- else:
- mesh.graph['vFov'] = 0.508015513
- half_short = np.tan(mesh.graph['vFov']/2.0)
- half_long = half_short / mesh.graph['aspect']
- mesh.graph['hFov'] = 2.0 * np.arctan(half_long)
-
- return mesh
-
-def reproject_3d_int_detail(sx, sy, z, k_00, k_02, k_11, k_12, w_offset, h_offset):
- abs_z = abs(z)
- return [abs_z * ((sy+0.5-w_offset) * k_00 + k_02), abs_z * ((sx+0.5-h_offset) * k_11 + k_12), abs_z]
-
-def reproject_3d_int_detail_FB(sx, sy, z, w_offset, h_offset, mesh):
- if mesh.graph.get('tan_hFov') is None:
- mesh.graph['tan_hFov'] = np.tan(mesh.graph['hFov'] / 2.)
- if mesh.graph.get('tan_vFov') is None:
- mesh.graph['tan_vFov'] = np.tan(mesh.graph['vFov'] / 2.)
-
- ray = np.array([(-1. + 2. * ((sy+0.5-w_offset)/(mesh.graph['W'] - 1))) * mesh.graph['tan_hFov'],
- (1. - 2. * (sx+0.5-h_offset)/(mesh.graph['H'] - 1)) * mesh.graph['tan_vFov'],
- -1])
- point_3d = ray * np.abs(z)
-
- return point_3d
-
-
-def reproject_3d_int(sx, sy, z, mesh):
- k = mesh.graph['cam_param_pix_inv'].copy()
- if k[0, 2] > 0:
- k = np.linalg.inv(k)
- ray = np.dot(k, np.array([sy-mesh.graph['woffset'], sx-mesh.graph['hoffset'], 1]).reshape(3, 1))
-
- point_3d = ray * np.abs(z)
- point_3d = point_3d.flatten()
-
- return point_3d
-
-def generate_init_node(mesh, config, min_node_in_cc):
- mesh_nodes = mesh.nodes
-
- info_on_pix = {}
-
- ccs = sorted(netx.connected_components(mesh), key = len, reverse=True)
- remove_nodes = []
-
- for cc in ccs:
-
- remove_flag = True if len(cc) < min_node_in_cc else False
- if remove_flag is False:
- for (nx, ny, nd) in cc:
- info_on_pix[(nx, ny)] = [{'depth':nd,
- 'color':mesh_nodes[(nx, ny, nd)]['color'],
- 'synthesis':False,
- 'disp':mesh_nodes[(nx, ny, nd)]['disp']}]
- else:
- [remove_nodes.append((nx, ny, nd)) for (nx, ny, nd) in cc]
-
- for node in remove_nodes:
- far_nodes = [] if mesh_nodes[node].get('far') is None else mesh_nodes[node]['far']
- for far_node in far_nodes:
- if mesh.has_node(far_node) and mesh_nodes[far_node].get('near') is not None and node in mesh_nodes[far_node]['near']:
- mesh_nodes[far_node]['near'].remove(node)
- near_nodes = [] if mesh_nodes[node].get('near') is None else mesh_nodes[node]['near']
- for near_node in near_nodes:
- if mesh.has_node(near_node) and mesh_nodes[near_node].get('far') is not None and node in mesh_nodes[near_node]['far']:
- mesh_nodes[near_node]['far'].remove(node)
-
- [mesh.remove_node(node) for node in remove_nodes]
-
- return mesh, info_on_pix
-
-def get_neighbors(mesh, node):
- return [*mesh.neighbors(node)]
-
-def generate_face(mesh, info_on_pix, config):
- H, W = mesh.graph['H'], mesh.graph['W']
- str_faces = []
- num_node = len(mesh.nodes)
- ply_flag = config.get('save_ply')
- def out_fmt(input, cur_id_b, cur_id_self, cur_id_a, ply_flag):
- if ply_flag is True:
- input.append(' '.join(['3', cur_id_b, cur_id_self, cur_id_a]) + '\n')
- else:
- input.append([cur_id_b, cur_id_self, cur_id_a])
- mesh_nodes = mesh.nodes
- for node in mesh_nodes:
- cur_id_self = mesh_nodes[node]['cur_id']
- ne_nodes = get_neighbors(mesh, node)
- four_dir_nes = {'up': [], 'left': [],
- 'down': [], 'right': []}
- for ne_node in ne_nodes:
- store_tuple = [ne_node, mesh_nodes[ne_node]['cur_id']]
- if ne_node[0] == node[0]:
- if ne_node[1] == ne_node[1] - 1:
- four_dir_nes['left'].append(store_tuple)
- else:
- four_dir_nes['right'].append(store_tuple)
- else:
- if ne_node[0] == ne_node[0] - 1:
- four_dir_nes['up'].append(store_tuple)
- else:
- four_dir_nes['down'].append(store_tuple)
- for node_a, cur_id_a in four_dir_nes['up']:
- for node_b, cur_id_b in four_dir_nes['right']:
- out_fmt(str_faces, cur_id_b, cur_id_self, cur_id_a, ply_flag)
- for node_a, cur_id_a in four_dir_nes['right']:
- for node_b, cur_id_b in four_dir_nes['down']:
- out_fmt(str_faces, cur_id_b, cur_id_self, cur_id_a, ply_flag)
- for node_a, cur_id_a in four_dir_nes['down']:
- for node_b, cur_id_b in four_dir_nes['left']:
- out_fmt(str_faces, cur_id_b, cur_id_self, cur_id_a, ply_flag)
- for node_a, cur_id_a in four_dir_nes['left']:
- for node_b, cur_id_b in four_dir_nes['up']:
- out_fmt(str_faces, cur_id_b, cur_id_self, cur_id_a, ply_flag)
-
- return str_faces
-
-def reassign_floating_island(mesh, info_on_pix, image, depth):
- H, W = mesh.graph['H'], mesh.graph['W'],
- mesh_nodes = mesh.nodes
- bord_up, bord_down = mesh.graph['bord_up'], mesh.graph['bord_down']
- bord_left, bord_right = mesh.graph['bord_left'], mesh.graph['bord_right']
- W = mesh.graph['W']
- lost_map = np.zeros((H, W))
-
- '''
- (5) is_inside(x, y, xmin, xmax, ymin, ymax) : Check if a pixel(x, y) is inside the border.
- (6) get_cross_nes(x, y) : Get the four cross neighbors of pixel(x, y).
- '''
- key_exist = lambda d, k: k in d
- is_inside = lambda x, y, xmin, xmax, ymin, ymax: xmin <= x < xmax and ymin <= y < ymax
- get_cross_nes = lambda x, y: [(x + 1, y), (x - 1, y), (x, y - 1), (x, y + 1)]
- '''
- (A) Highlight the pixels on isolated floating island.
- (B) Number those isolated floating islands with connected component analysis.
- (C) For each isolated island:
- (1) Find its longest surrounded depth edge.
- (2) Propogate depth from that depth edge to the pixels on the isolated island.
- (3) Build the connection between the depth edge and that isolated island.
- '''
- for x in range(H):
- for y in range(W):
- if is_inside(x, y, bord_up, bord_down, bord_left, bord_right) and not(key_exist(info_on_pix, (x, y))):
- lost_map[x, y] = 1
- _, label_lost_map = cv2.connectedComponents(lost_map.astype(np.uint8), connectivity=4)
- mask = np.zeros((H, W))
- mask[bord_up:bord_down, bord_left:bord_right] = 1
- label_lost_map = (label_lost_map * mask).astype(np.int)
-
- for i in range(1, label_lost_map.max()+1):
- lost_xs, lost_ys = np.where(label_lost_map == i)
- surr_edge_ids = {}
- for lost_x, lost_y in zip(lost_xs, lost_ys):
- if (lost_x, lost_y) == (295, 389) or (lost_x, lost_y) == (296, 389):
- import pdb; pdb.set_trace()
- for ne in get_cross_nes(lost_x, lost_y):
- if key_exist(info_on_pix, ne):
- for info in info_on_pix[ne]:
- ne_node = (ne[0], ne[1], info['depth'])
- if key_exist(mesh_nodes[ne_node], 'edge_id'):
- edge_id = mesh_nodes[ne_node]['edge_id']
- surr_edge_ids[edge_id] = surr_edge_ids[edge_id] + [ne_node] if \
- key_exist(surr_edge_ids, edge_id) else [ne_node]
- if len(surr_edge_ids) == 0:
- continue
- edge_id, edge_nodes = sorted([*surr_edge_ids.items()], key=lambda x: len(x[1]), reverse=True)[0]
- edge_depth_map = np.zeros((H, W))
- for node in edge_nodes:
- edge_depth_map[node[0], node[1]] = node[2]
- lost_xs, lost_ys = np.where(label_lost_map == i)
- while lost_xs.shape[0] > 0:
- lost_xs, lost_ys = np.where(label_lost_map == i)
- for lost_x, lost_y in zip(lost_xs, lost_ys):
- propagated_depth = []
- real_nes = []
- for ne in get_cross_nes(lost_x, lost_y):
- if not(is_inside(ne[0], ne[1], bord_up, bord_down, bord_left, bord_right)) or \
- edge_depth_map[ne[0], ne[1]] == 0:
- continue
- propagated_depth.append(edge_depth_map[ne[0], ne[1]])
- real_nes.append(ne)
- if len(real_nes) == 0:
- continue
- reassign_depth = np.mean(propagated_depth)
- label_lost_map[lost_x, lost_y] = 0
- edge_depth_map[lost_x, lost_y] = reassign_depth
- depth[lost_x, lost_y] = -reassign_depth
- mesh.add_node((lost_x, lost_y, reassign_depth), color=image[lost_x, lost_y],
- synthesis=False,
- disp=1./reassign_depth,
- cc_id=set())
- info_on_pix[(lost_x, lost_y)] = [{'depth':reassign_depth,
- 'color':image[lost_x, lost_y],
- 'synthesis':False,
- 'disp':1./reassign_depth}]
- new_connections = [((lost_x, lost_y, reassign_depth),
- (ne[0], ne[1], edge_depth_map[ne[0], ne[1]])) for ne in real_nes]
- mesh.add_edges_from(new_connections)
-
- return mesh, info_on_pix, depth
-
-def remove_node_feat(mesh, *feats):
- mesh_nodes = mesh.nodes
- for node in mesh_nodes:
- for feat in feats:
- mesh_nodes[node][feat] = None
-
- return mesh
-
-def update_status(mesh, info_on_pix, depth=None):
- '''
- (2) clear_node_feat(G, *fts) : Clear all the node feature on graph G.
- (6) get_cross_nes(x, y) : Get the four cross neighbors of pixel(x, y).
- '''
- key_exist = lambda d, k: d.get(k) is not None
- is_inside = lambda x, y, xmin, xmax, ymin, ymax: xmin <= x < xmax and ymin <= y < ymax
- get_cross_nes = lambda x, y: [(x + 1, y), (x - 1, y), (x, y - 1), (x, y + 1)]
- append_element = lambda d, k, x: d[k] + [x] if key_exist(d, k) else [x]
-
- def clear_node_feat(G, fts):
- le_nodes = G.nodes
- for k in le_nodes:
- v = le_nodes[k]
- for ft in fts:
- if ft in v:
- v[ft] = None
-
- clear_node_feat(mesh, ['edge_id', 'far', 'near'])
- bord_up, bord_down = mesh.graph['bord_up'], mesh.graph['bord_down']
- bord_left, bord_right = mesh.graph['bord_left'], mesh.graph['bord_right']
-
- le_nodes = mesh.nodes
-
- for node_key in le_nodes:
- if mesh.neighbors(node_key).__length_hint__() == 4:
- continue
- four_nes = [xx for xx in get_cross_nes(node_key[0], node_key[1]) if
- is_inside(xx[0], xx[1], bord_up, bord_down, bord_left, bord_right) and
- xx in info_on_pix]
- [four_nes.remove((ne_node[0], ne_node[1])) for ne_node in mesh.neighbors(node_key)]
- for ne in four_nes:
- for info in info_on_pix[ne]:
- assert mesh.has_node((ne[0], ne[1], info['depth'])), "No node_key"
- ind_node = le_nodes[node_key]
- if abs(node_key[2]) > abs(info['depth']):
- ind_node['near'] = append_element(ind_node, 'near', (ne[0], ne[1], info['depth']))
- else:
- ind_node['far'] = append_element(ind_node, 'far', (ne[0], ne[1], info['depth']))
- if depth is not None:
- for key, value in info_on_pix.items():
- if depth[key[0], key[1]] != abs(value[0]['depth']):
- value[0]['disp'] = 1. / value[0]['depth']
- depth[key[0], key[1]] = abs(value[0]['depth'])
-
- return mesh, depth, info_on_pix
- else:
- return mesh
-
-def group_edges(LDI, config, image, remove_conflict_ordinal, spdb=False):
-
- '''
- (1) add_new_node(G, node) : add "node" to graph "G"
- (2) add_new_edge(G, node_a, node_b) : add edge "node_a--node_b" to graph "G"
- (3) exceed_thre(x, y, thre) : Check if difference between "x" and "y" exceed threshold "thre"
- (4) key_exist(d, k) : Check if key "k' exists in dictionary "d"
- (5) comm_opp_bg(G, x, y) : Check if node "x" and "y" in graph "G" treat the same opposite node as background
- (6) comm_opp_fg(G, x, y) : Check if node "x" and "y" in graph "G" treat the same opposite node as foreground
- '''
- add_new_node = lambda G, node: None if G.has_node(node) else G.add_node(node)
- add_new_edge = lambda G, node_a, node_b: None if G.has_edge(node_a, node_b) else G.add_edge(node_a, node_b)
- exceed_thre = lambda x, y, thre: (abs(x) - abs(y)) > thre
- key_exist = lambda d, k: d.get(k) is not None
- comm_opp_bg = lambda G, x, y: key_exist(G.nodes[x], 'far') and key_exist(G.nodes[y], 'far') and \
- not(set(G.nodes[x]['far']).isdisjoint(set(G.nodes[y]['far'])))
- comm_opp_fg = lambda G, x, y: key_exist(G.nodes[x], 'near') and key_exist(G.nodes[y], 'near') and \
- not(set(G.nodes[x]['near']).isdisjoint(set(G.nodes[y]['near'])))
- discont_graph = netx.Graph()
- '''
- (A) Skip the pixel at image boundary, we don't want to deal with them.
- (B) Identify discontinuity by the number of its neighbor(degree).
- If the degree < 4(up/right/buttom/left). We will go through following steps:
- (1) Add the discontinuity pixel "node" to graph "discont_graph".
- (2) Find "node"'s cross neighbor(up/right/buttom/left) "ne_node".
- - If the cross neighbor "ne_node" is a discontinuity pixel(degree("ne_node") < 4),
- (a) add it to graph "discont_graph" and build the connection between "ne_node" and "node".
- (b) label its cross neighbor as invalid pixels "inval_diag_candi" to avoid building
- connection between original discontinuity pixel "node" and "inval_diag_candi".
- - Otherwise, find "ne_node"'s cross neighbors, called diagonal candidate "diag_candi".
- - The "diag_candi" is diagonal to the original discontinuity pixel "node".
- - If "diag_candi" exists, go to step(3).
- (3) A diagonal candidate "diag_candi" will be :
- - added to the "discont_graph" if its degree < 4.
- - connected to the original discontinuity pixel "node" if it satisfied either
- one of following criterion:
- (a) the difference of disparity between "diag_candi" and "node" is smaller than default threshold.
- (b) the "diag_candi" and "node" face the same opposite pixel. (See. function "tear_edges")
- (c) Both of "diag_candi" and "node" must_connect to each other. (See. function "combine_end_node")
- (C) Aggregate each connected part in "discont_graph" into "discont_ccs" (A.K.A. depth edge).
- '''
- for node in LDI.nodes:
- if not(LDI.graph['bord_up'] + 1 <= node[0] <= LDI.graph['bord_down'] - 2 and \
- LDI.graph['bord_left'] + 1 <= node[1] <= LDI.graph['bord_right'] - 2):
- continue
- neighbors = [*LDI.neighbors(node)]
- if len(neighbors) < 4:
- add_new_node(discont_graph, node)
- diag_candi_anc, inval_diag_candi, discont_nes = set(), set(), set()
- for ne_node in neighbors:
- if len([*LDI.neighbors(ne_node)]) < 4:
- add_new_node(discont_graph, ne_node)
- add_new_edge(discont_graph, ne_node, node)
- discont_nes.add(ne_node)
- else:
- diag_candi_anc.add(ne_node)
- inval_diag_candi = set([inval_diagonal for ne_node in discont_nes for inval_diagonal in LDI.neighbors(ne_node) if \
- abs(inval_diagonal[0] - node[0]) < 2 and abs(inval_diagonal[1] - node[1]) < 2])
- for ne_node in diag_candi_anc:
- if ne_node[0] == node[0]:
- diagonal_xys = [[ne_node[0] + 1, ne_node[1]], [ne_node[0] - 1, ne_node[1]]]
- elif ne_node[1] == node[1]:
- diagonal_xys = [[ne_node[0], ne_node[1] + 1], [ne_node[0], ne_node[1] - 1]]
- for diag_candi in LDI.neighbors(ne_node):
- if [diag_candi[0], diag_candi[1]] in diagonal_xys and LDI.degree(diag_candi) < 4:
- if diag_candi not in inval_diag_candi:
- if not exceed_thre(1./node[2], 1./diag_candi[2], config['depth_threshold']) or \
- (comm_opp_bg(LDI, diag_candi, node) and comm_opp_fg(LDI, diag_candi, node)):
- add_new_node(discont_graph, diag_candi)
- add_new_edge(discont_graph, diag_candi, node)
- if key_exist(LDI.nodes[diag_candi], 'must_connect') and node in LDI.nodes[diag_candi]['must_connect'] and \
- key_exist(LDI.nodes[node], 'must_connect') and diag_candi in LDI.nodes[node]['must_connect']:
- add_new_node(discont_graph, diag_candi)
- add_new_edge(discont_graph, diag_candi, node)
- if spdb == True:
- import pdb; pdb.set_trace()
- discont_ccs = [*netx.connected_components(discont_graph)]
- '''
- In some corner case, a depth edge "discont_cc" will contain both
- foreground(FG) and background(BG) pixels. This violate the assumption that
- a depth edge can only composite by one type of pixel(FG or BG).
- We need to further divide this depth edge into several sub-part so that the
- assumption is satisfied.
- (A) A depth edge is invalid if both of its "far_flag"(BG) and
- "near_flag"(FG) are True.
- (B) If the depth edge is invalid, we need to do:
- (1) Find the role("oridinal") of each pixel on the depth edge.
- "-1" --> Its opposite pixels has smaller depth(near) than it.
- It is a backgorund pixel.
- "+1" --> Its opposite pixels has larger depth(far) than it.
- It is a foregorund pixel.
- "0" --> Some of opposite pixels has larger depth(far) than it,
- and some has smaller pixel than it.
- It is an ambiguous pixel.
- (2) For each pixel "discont_node", check if its neigbhors' roles are consistent.
- - If not, break the connection between the neighbor "ne_node" that has a role
- different from "discont_node".
- - If yes, remove all the role that are inconsistent to its neighbors "ne_node".
- (3) Connected component analysis to re-identified those divided depth edge.
- (C) Aggregate each connected part in "discont_graph" into "discont_ccs" (A.K.A. depth edge).
- '''
- if remove_conflict_ordinal:
- new_discont_ccs = []
- num_new_cc = 0
- for edge_id, discont_cc in enumerate(discont_ccs):
- near_flag = False
- far_flag = False
- for discont_node in discont_cc:
- near_flag = True if key_exist(LDI.nodes[discont_node], 'far') else near_flag
- far_flag = True if key_exist(LDI.nodes[discont_node], 'near') else far_flag
- if far_flag and near_flag:
- break
- if far_flag and near_flag:
- for discont_node in discont_cc:
- discont_graph.nodes[discont_node]['ordinal'] = \
- np.array([key_exist(LDI.nodes[discont_node], 'far'),
- key_exist(LDI.nodes[discont_node], 'near')]) * \
- np.array([-1, 1])
- discont_graph.nodes[discont_node]['ordinal'] = \
- np.sum(discont_graph.nodes[discont_node]['ordinal'])
- remove_nodes, remove_edges = [], []
- for discont_node in discont_cc:
- ordinal_relation = np.sum([discont_graph.nodes[xx]['ordinal'] \
- for xx in discont_graph.neighbors(discont_node)])
- near_side = discont_graph.nodes[discont_node]['ordinal'] <= 0
- if abs(ordinal_relation) < len([*discont_graph.neighbors(discont_node)]):
- remove_nodes.append(discont_node)
- for ne_node in discont_graph.neighbors(discont_node):
- remove_flag = (near_side and not(key_exist(LDI.nodes[ne_node], 'far'))) or \
- (not near_side and not(key_exist(LDI.nodes[ne_node], 'near')))
- remove_edges += [(discont_node, ne_node)] if remove_flag else []
- else:
- if near_side and key_exist(LDI.nodes[discont_node], 'near'):
- LDI.nodes[discont_node].pop('near')
- elif not(near_side) and key_exist(LDI.nodes[discont_node], 'far'):
- LDI.nodes[discont_node].pop('far')
- discont_graph.remove_edges_from(remove_edges)
- sub_mesh = discont_graph.subgraph(list(discont_cc)).copy()
- sub_discont_ccs = [*netx.connected_components(sub_mesh)]
- is_redun_near = lambda xx: len(xx) == 1 and xx[0] in remove_nodes and key_exist(LDI.nodes[xx[0]], 'far')
- for sub_discont_cc in sub_discont_ccs:
- if is_redun_near(list(sub_discont_cc)):
- LDI.nodes[list(sub_discont_cc)[0]].pop('far')
- new_discont_ccs.append(sub_discont_cc)
- else:
- new_discont_ccs.append(discont_cc)
- discont_ccs = new_discont_ccs
- new_discont_ccs = None
- if spdb == True:
- import pdb; pdb.set_trace()
-
- for edge_id, edge_cc in enumerate(discont_ccs):
- for node in edge_cc:
- LDI.nodes[node]['edge_id'] = edge_id
-
- return discont_ccs, LDI, discont_graph
-
-def combine_end_node(mesh, edge_mesh, edge_ccs, depth):
- import collections
- mesh_nodes = mesh.nodes
- connect_dict = dict()
- for valid_edge_id, valid_edge_cc in enumerate(edge_ccs):
- connect_info = []
- for valid_edge_node in valid_edge_cc:
- single_connect = set()
- for ne_node in mesh.neighbors(valid_edge_node):
- if mesh_nodes[ne_node].get('far') is not None:
- for fn in mesh_nodes[ne_node].get('far'):
- if mesh.has_node(fn) and mesh_nodes[fn].get('edge_id') is not None:
- single_connect.add(mesh_nodes[fn]['edge_id'])
- if mesh_nodes[ne_node].get('near') is not None:
- for fn in mesh_nodes[ne_node].get('near'):
- if mesh.has_node(fn) and mesh_nodes[fn].get('edge_id') is not None:
- single_connect.add(mesh_nodes[fn]['edge_id'])
- connect_info.extend([*single_connect])
- connect_dict[valid_edge_id] = collections.Counter(connect_info)
-
- end_maps = np.zeros((mesh.graph['H'], mesh.graph['W']))
- edge_maps = np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1
- for valid_edge_id, valid_edge_cc in enumerate(edge_ccs):
- for valid_edge_node in valid_edge_cc:
- edge_maps[valid_edge_node[0], valid_edge_node[1]] = valid_edge_id
- if len([*edge_mesh.neighbors(valid_edge_node)]) == 1:
- num_ne = 1
- if num_ne == 1:
- end_maps[valid_edge_node[0], valid_edge_node[1]] = valid_edge_node[2]
- nxs, nys = np.where(end_maps != 0)
- invalid_nodes = set()
- for nx, ny in zip(nxs, nys):
- if mesh.has_node((nx, ny, end_maps[nx, ny])) is False:
- invalid_nodes.add((nx, ny))
- continue
- four_nes = [xx for xx in [(nx - 1, ny), (nx + 1, ny), (nx, ny - 1), (nx, ny + 1)] \
- if 0 <= xx[0] < mesh.graph['H'] and 0 <= xx[1] < mesh.graph['W'] and \
- end_maps[xx[0], xx[1]] != 0]
- mesh_nes = [*mesh.neighbors((nx, ny, end_maps[nx, ny]))]
- remove_num = 0
- for fne in four_nes:
- if (fne[0], fne[1], end_maps[fne[0], fne[1]]) in mesh_nes:
- remove_num += 1
- if remove_num == len(four_nes):
- invalid_nodes.add((nx, ny))
- for invalid_node in invalid_nodes:
- end_maps[invalid_node[0], invalid_node[1]] = 0
-
- nxs, nys = np.where(end_maps != 0)
- invalid_nodes = set()
- for nx, ny in zip(nxs, nys):
- if mesh_nodes[(nx, ny, end_maps[nx, ny])].get('edge_id') is None:
- continue
- else:
- self_id = mesh_nodes[(nx, ny, end_maps[nx, ny])].get('edge_id')
- self_connect = connect_dict[self_id] if connect_dict.get(self_id) is not None else dict()
- four_nes = [xx for xx in [(nx - 1, ny), (nx + 1, ny), (nx, ny - 1), (nx, ny + 1)] \
- if 0 <= xx[0] < mesh.graph['H'] and 0 <= xx[1] < mesh.graph['W'] and \
- end_maps[xx[0], xx[1]] != 0]
- for fne in four_nes:
- if mesh_nodes[(fne[0], fne[1], end_maps[fne[0], fne[1]])].get('edge_id') is None:
- continue
- else:
- ne_id = mesh_nodes[(fne[0], fne[1], end_maps[fne[0], fne[1]])]['edge_id']
- if self_connect.get(ne_id) is None or self_connect.get(ne_id) == 1:
- continue
- else:
- invalid_nodes.add((nx, ny))
- for invalid_node in invalid_nodes:
- end_maps[invalid_node[0], invalid_node[1]] = 0
- nxs, nys = np.where(end_maps != 0)
- invalid_nodes = set()
- for nx, ny in zip(nxs, nys):
- four_nes = [xx for xx in [(nx - 1, ny), (nx + 1, ny), (nx, ny - 1), (nx, ny + 1)] \
- if 0 <= xx[0] < mesh.graph['H'] and 0 <= xx[1] < mesh.graph['W'] and \
- end_maps[xx[0], xx[1]] != 0]
- for fne in four_nes:
- if mesh.has_node((fne[0], fne[1], end_maps[fne[0], fne[1]])):
- node_a, node_b = (fne[0], fne[1], end_maps[fne[0], fne[1]]), (nx, ny, end_maps[nx, ny])
- mesh.add_edge(node_a, node_b)
- mesh_nodes[node_b]['must_connect'] = set() if mesh_nodes[node_b].get('must_connect') is None else mesh_nodes[node_b]['must_connect']
- mesh_nodes[node_b]['must_connect'].add(node_a)
- mesh_nodes[node_b]['must_connect'] |= set([xx for xx in [*edge_mesh.neighbors(node_a)] if \
- (xx[0] - node_b[0]) < 2 and (xx[1] - node_b[1]) < 2])
- mesh_nodes[node_a]['must_connect'] = set() if mesh_nodes[node_a].get('must_connect') is None else mesh_nodes[node_a]['must_connect']
- mesh_nodes[node_a]['must_connect'].add(node_b)
- mesh_nodes[node_a]['must_connect'] |= set([xx for xx in [*edge_mesh.neighbors(node_b)] if \
- (xx[0] - node_a[0]) < 2 and (xx[1] - node_a[1]) < 2])
- invalid_nodes.add((nx, ny))
- for invalid_node in invalid_nodes:
- end_maps[invalid_node[0], invalid_node[1]] = 0
-
- return mesh
-
-def remove_redundant_edge(mesh, edge_mesh, edge_ccs, info_on_pix, config, redundant_number=1000, invalid=False, spdb=False):
- point_to_amount = {}
- point_to_id = {}
- end_maps = np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1
- for valid_edge_id, valid_edge_cc in enumerate(edge_ccs):
- for valid_edge_node in valid_edge_cc:
- point_to_amount[valid_edge_node] = len(valid_edge_cc)
- point_to_id[valid_edge_node] = valid_edge_id
- if edge_mesh.has_node(valid_edge_node) is True:
- if len([*edge_mesh.neighbors(valid_edge_node)]) == 1:
- end_maps[valid_edge_node[0], valid_edge_node[1]] = valid_edge_id
- nxs, nys = np.where(end_maps > -1)
- point_to_adjoint = {}
- for nx, ny in zip(nxs, nys):
- adjoint_edges = set([end_maps[x, y] for x, y in [(nx + 1, ny), (nx - 1, ny), (nx, ny + 1), (nx, ny - 1)] if end_maps[x, y] != -1])
- point_to_adjoint[end_maps[nx, ny]] = (point_to_adjoint[end_maps[nx, ny]] | adjoint_edges) if point_to_adjoint.get(end_maps[nx, ny]) is not None else adjoint_edges
- valid_edge_ccs = filter_edge(mesh, edge_ccs, config, invalid=invalid)
- edge_canvas = np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1
- for valid_edge_id, valid_edge_cc in enumerate(valid_edge_ccs):
- for valid_edge_node in valid_edge_cc:
- edge_canvas[valid_edge_node[0], valid_edge_node[1]] = valid_edge_id
- if spdb is True:
- plt.imshow(edge_canvas); plt.show()
- import pdb; pdb.set_trace()
- for valid_edge_id, valid_edge_cc in enumerate(valid_edge_ccs):
- end_number = 0
- four_end_number = 0
- eight_end_number = 0
- db_eight_end_number = 0
- if len(valid_edge_cc) > redundant_number:
- continue
- for valid_edge_node in valid_edge_cc:
- if len([*edge_mesh.neighbors(valid_edge_node)]) == 3:
- break
- elif len([*edge_mesh.neighbors(valid_edge_node)]) == 1:
- hx, hy, hz = valid_edge_node
- if invalid is False:
- eight_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1),
- (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \
- if info_on_pix.get((x, y)) is not None and edge_canvas[x, y] != -1 and edge_canvas[x, y] != valid_edge_id]
- if len(eight_nes) == 0:
- end_number += 1
- if invalid is True:
- four_nes = []; eight_nes = []; db_eight_nes = []
- four_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] \
- if info_on_pix.get((x, y)) is not None and edge_canvas[x, y] != -1 and edge_canvas[x, y] != valid_edge_id]
- eight_nes = [(x, y) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1), \
- (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \
- if info_on_pix.get((x, y)) is not None and edge_canvas[x, y] != -1 and edge_canvas[x, y] != valid_edge_id]
- db_eight_nes = [(x, y) for x in range(hx - 2, hx + 3) for y in range(hy - 2, hy + 3) \
- if info_on_pix.get((x, y)) is not None and edge_canvas[x, y] != -1 and edge_canvas[x, y] != valid_edge_id and (x, y) != (hx, hy)]
- if len(four_nes) == 0 or len(eight_nes) == 0:
- end_number += 1
- if len(four_nes) == 0:
- four_end_number += 1
- if len(eight_nes) == 0:
- eight_end_number += 1
- if len(db_eight_nes) == 0:
- db_eight_end_number += 1
- elif len([*edge_mesh.neighbors(valid_edge_node)]) == 0:
- hx, hy, hz = valid_edge_node
- four_nes = [(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] \
- if info_on_pix.get((x, y)) is not None and \
- mesh.has_edge(valid_edge_node, (x, y, info_on_pix[(x, y)][0]['depth'])) is False]
- for ne in four_nes:
- try:
- if invalid is True or (point_to_amount.get(ne) is None or point_to_amount[ne] < redundant_number) or \
- point_to_id[ne] in point_to_adjoint.get(point_to_id[valid_edge_node], set()):
- mesh.add_edge(valid_edge_node, ne)
- except:
- import pdb; pdb.set_trace()
- if (invalid is not True and end_number >= 1) or (invalid is True and end_number >= 2 and eight_end_number >= 1 and db_eight_end_number >= 1):
- for valid_edge_node in valid_edge_cc:
- hx, hy, _ = valid_edge_node
- four_nes = [(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] \
- if info_on_pix.get((x, y)) is not None and \
- mesh.has_edge(valid_edge_node, (x, y, info_on_pix[(x, y)][0]['depth'])) is False and \
- (edge_canvas[x, y] == -1 or edge_canvas[x, y] == valid_edge_id)]
- for ne in four_nes:
- if invalid is True or (point_to_amount.get(ne) is None or point_to_amount[ne] < redundant_number) or \
- point_to_id[ne] in point_to_adjoint.get(point_to_id[valid_edge_node], set()):
- mesh.add_edge(valid_edge_node, ne)
-
- return mesh
-
-def judge_dangle(mark, mesh, node):
- if not (1 <= node[0] < mesh.graph['H']-1) or not(1 <= node[1] < mesh.graph['W']-1):
- return mark
- mesh_neighbors = [*mesh.neighbors(node)]
- mesh_neighbors = [xx for xx in mesh_neighbors if 0 < xx[0] < mesh.graph['H'] - 1 and 0 < xx[1] < mesh.graph['W'] - 1]
- if len(mesh_neighbors) >= 3:
- return mark
- elif len(mesh_neighbors) <= 1:
- mark[node[0], node[1]] = (len(mesh_neighbors) + 1)
- else:
- dan_ne_node_a = mesh_neighbors[0]
- dan_ne_node_b = mesh_neighbors[1]
- if abs(dan_ne_node_a[0] - dan_ne_node_b[0]) > 1 or \
- abs(dan_ne_node_a[1] - dan_ne_node_b[1]) > 1:
- mark[node[0], node[1]] = 3
-
- return mark
-
-def remove_dangling(mesh, edge_ccs, edge_mesh, info_on_pix, image, depth, config):
-
- tmp_edge_ccs = copy.deepcopy(edge_ccs)
- for edge_cc_id, valid_edge_cc in enumerate(tmp_edge_ccs):
- if len(valid_edge_cc) > 1 or len(valid_edge_cc) == 0:
- continue
- single_edge_node = [*valid_edge_cc][0]
- hx, hy, hz = single_edge_node
- eight_nes = set([(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1),
- (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \
- if info_on_pix.get((x, y)) is not None])
- four_nes = [(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] \
- if info_on_pix.get((x, y)) is not None]
- sub_mesh = mesh.subgraph(eight_nes).copy()
- ccs = netx.connected_components(sub_mesh)
- four_ccs = []
- for cc_id, _cc in enumerate(ccs):
- four_ccs.append(set())
- for cc_node in _cc:
- if abs(cc_node[0] - hx) + abs(cc_node[1] - hy) < 2:
- four_ccs[cc_id].add(cc_node)
- largest_cc = sorted(four_ccs, key=lambda x: (len(x), -np.sum([abs(xx[2] - hz) for xx in x])))[-1]
- if len(largest_cc) < 2:
- for ne in four_nes:
- mesh.add_edge(single_edge_node, ne)
- else:
- mesh.remove_edges_from([(single_edge_node, ne) for ne in mesh.neighbors(single_edge_node)])
- new_depth = np.mean([xx[2] for xx in largest_cc])
- info_on_pix[(hx, hy)][0]['depth'] = new_depth
- info_on_pix[(hx, hy)][0]['disp'] = 1./new_depth
- new_node = (hx, hy, new_depth)
- mesh = refresh_node(single_edge_node, mesh.node[single_edge_node], new_node, dict(), mesh)
- edge_ccs[edge_cc_id] = set([new_node])
- for ne in largest_cc:
- mesh.add_edge(new_node, ne)
-
- mark = np.zeros((mesh.graph['H'], mesh.graph['W']))
- for edge_idx, edge_cc in enumerate(edge_ccs):
- for edge_node in edge_cc:
- if not (mesh.graph['bord_up'] <= edge_node[0] < mesh.graph['bord_down']-1) or \
- not (mesh.graph['bord_left'] <= edge_node[1] < mesh.graph['bord_right']-1):
- continue
- mesh_neighbors = [*mesh.neighbors(edge_node)]
- mesh_neighbors = [xx for xx in mesh_neighbors \
- if mesh.graph['bord_up'] < xx[0] < mesh.graph['bord_down'] - 1 and \
- mesh.graph['bord_left'] < xx[1] < mesh.graph['bord_right'] - 1]
- if len([*mesh.neighbors(edge_node)]) >= 3:
- continue
- elif len([*mesh.neighbors(edge_node)]) <= 1:
- mark[edge_node[0], edge_node[1]] += (len([*mesh.neighbors(edge_node)]) + 1)
- else:
- dan_ne_node_a = [*mesh.neighbors(edge_node)][0]
- dan_ne_node_b = [*mesh.neighbors(edge_node)][1]
- if abs(dan_ne_node_a[0] - dan_ne_node_b[0]) > 1 or \
- abs(dan_ne_node_a[1] - dan_ne_node_b[1]) > 1:
- mark[edge_node[0], edge_node[1]] += 3
- mxs, mys = np.where(mark == 1)
- conn_0_nodes = [(x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth']) for x in zip(mxs, mys) \
- if mesh.has_node((x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth']))]
- mxs, mys = np.where(mark == 2)
- conn_1_nodes = [(x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth']) for x in zip(mxs, mys) \
- if mesh.has_node((x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth']))]
- for node in conn_0_nodes:
- hx, hy = node[0], node[1]
- four_nes = [(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] \
- if info_on_pix.get((x, y)) is not None]
- re_depth = {'value' : 0, 'count': 0}
- for ne in four_nes:
- mesh.add_edge(node, ne)
- re_depth['value'] += cc_node[2]
- re_depth['count'] += 1.
- re_depth = re_depth['value'] / re_depth['count']
- mapping_dict = {node: (node[0], node[1], re_depth)}
- info_on_pix, mesh, edge_mesh = update_info(mapping_dict, info_on_pix, mesh, edge_mesh)
- depth[node[0], node[1]] = abs(re_depth)
- mark[node[0], node[1]] = 0
- for node in conn_1_nodes:
- hx, hy = node[0], node[1]
- eight_nes = set([(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1),
- (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \
- if info_on_pix.get((x, y)) is not None])
- self_nes = set([ne2 for ne1 in mesh.neighbors(node) for ne2 in mesh.neighbors(ne1) if ne2 in eight_nes])
- eight_nes = [*(eight_nes - self_nes)]
- sub_mesh = mesh.subgraph(eight_nes).copy()
- ccs = netx.connected_components(sub_mesh)
- largest_cc = sorted(ccs, key=lambda x: (len(x), -np.sum([abs(xx[0] - node[0]) + abs(xx[1] - node[1]) for xx in x])))[-1]
-
- mesh.remove_edges_from([(xx, node) for xx in mesh.neighbors(node)])
- re_depth = {'value' : 0, 'count': 0}
- for cc_node in largest_cc:
- if cc_node[0] == node[0] and cc_node[1] == node[1]:
- continue
- re_depth['value'] += cc_node[2]
- re_depth['count'] += 1.
- if abs(cc_node[0] - node[0]) + abs(cc_node[1] - node[1]) < 2:
- mesh.add_edge(cc_node, node)
- try:
- re_depth = re_depth['value'] / re_depth['count']
- except:
- re_depth = node[2]
- renode = (node[0], node[1], re_depth)
- mapping_dict = {node: renode}
- info_on_pix, mesh, edge_mesh = update_info(mapping_dict, info_on_pix, mesh, edge_mesh)
- depth[node[0], node[1]] = abs(re_depth)
- mark[node[0], node[1]] = 0
- edge_mesh, mesh, mark, info_on_pix = recursive_add_edge(edge_mesh, mesh, info_on_pix, renode, mark)
- mxs, mys = np.where(mark == 3)
- conn_2_nodes = [(x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth']) for x in zip(mxs, mys) \
- if mesh.has_node((x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth'])) and \
- mesh.degree((x[0], x[1], info_on_pix[(x[0], x[1])][0]['depth'])) == 2]
- sub_mesh = mesh.subgraph(conn_2_nodes).copy()
- ccs = netx.connected_components(sub_mesh)
- for cc in ccs:
- candidate_nodes = [xx for xx in cc if sub_mesh.degree(xx) == 1]
- for node in candidate_nodes:
- if mesh.has_node(node) is False:
- continue
- ne_node = [xx for xx in mesh.neighbors(node) if xx not in cc][0]
- hx, hy = node[0], node[1]
- eight_nes = set([(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1),
- (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \
- if info_on_pix.get((x, y)) is not None and (x, y, info_on_pix[(x, y)][0]['depth']) not in cc])
- ne_sub_mesh = mesh.subgraph(eight_nes).copy()
- ne_ccs = netx.connected_components(ne_sub_mesh)
- try:
- ne_cc = [ne_cc for ne_cc in ne_ccs if ne_node in ne_cc][0]
- except:
- import pdb; pdb.set_trace()
- largest_cc = [xx for xx in ne_cc if abs(xx[0] - node[0]) + abs(xx[1] - node[1]) == 1]
- mesh.remove_edges_from([(xx, node) for xx in mesh.neighbors(node)])
- re_depth = {'value' : 0, 'count': 0}
- for cc_node in largest_cc:
- re_depth['value'] += cc_node[2]
- re_depth['count'] += 1.
- mesh.add_edge(cc_node, node)
- try:
- re_depth = re_depth['value'] / re_depth['count']
- except:
- re_depth = node[2]
- renode = (node[0], node[1], re_depth)
- mapping_dict = {node: renode}
- info_on_pix, mesh, edge_mesh = update_info(mapping_dict, info_on_pix, mesh, edge_mesh)
- depth[node[0], node[1]] = abs(re_depth)
- mark[node[0], node[1]] = 0
- edge_mesh, mesh, mark, info_on_pix = recursive_add_edge(edge_mesh, mesh, info_on_pix, renode, mark)
- break
- if len(cc) == 1:
- node = [node for node in cc][0]
- hx, hy = node[0], node[1]
- nine_nes = set([(x, y, info_on_pix[(x, y)][0]['depth']) for x, y in [(hx, hy), (hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1),
- (hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)] \
- if info_on_pix.get((x, y)) is not None and mesh.has_node((x, y, info_on_pix[(x, y)][0]['depth']))])
- ne_sub_mesh = mesh.subgraph(nine_nes).copy()
- ne_ccs = netx.connected_components(ne_sub_mesh)
- for ne_cc in ne_ccs:
- if node in ne_cc:
- re_depth = {'value' : 0, 'count': 0}
- for ne in ne_cc:
- if abs(ne[0] - node[0]) + abs(ne[1] - node[1]) == 1:
- mesh.add_edge(node, ne)
- re_depth['value'] += ne[2]
- re_depth['count'] += 1.
- re_depth = re_depth['value'] / re_depth['count']
- mapping_dict = {node: (node[0], node[1], re_depth)}
- info_on_pix, mesh, edge_mesh = update_info(mapping_dict, info_on_pix, mesh, edge_mesh)
- depth[node[0], node[1]] = abs(re_depth)
- mark[node[0], node[1]] = 0
-
-
- return mesh, info_on_pix, edge_mesh, depth, mark
-
-def context_and_holes(mesh, edge_ccs, config, specific_edge_id, specific_edge_loc, depth_feat_model,
- connect_points_ccs=None, inpaint_iter=0, filter_edge=False, vis_edge_id=None):
- edge_maps = np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1
- mask_info = {}
- for edge_id, edge_cc in enumerate(edge_ccs):
- for edge_node in edge_cc:
- edge_maps[edge_node[0], edge_node[1]] = edge_id
-
- context_ccs = [set() for x in range(len(edge_ccs))]
- extend_context_ccs = [set() for x in range(len(edge_ccs))]
- extend_erode_context_ccs = [set() for x in range(len(edge_ccs))]
- extend_edge_ccs = [set() for x in range(len(edge_ccs))]
- accomp_extend_context_ccs = [set() for x in range(len(edge_ccs))]
- erode_context_ccs = [set() for x in range(len(edge_ccs))]
- broken_mask_ccs = [set() for x in range(len(edge_ccs))]
- invalid_extend_edge_ccs = [set() for x in range(len(edge_ccs))]
- intouched_ccs = [set() for x in range(len(edge_ccs))]
- redundant_ccs = [set() for x in range(len(edge_ccs))]
- if inpaint_iter == 0:
- background_thickness = config['background_thickness']
- context_thickness = config['context_thickness']
- else:
- background_thickness = config['background_thickness_2']
- context_thickness = config['context_thickness_2']
-
- mesh_nodes = mesh.nodes
- for edge_id, edge_cc in enumerate(edge_ccs):
- if context_thickness == 0 or (len(specific_edge_id) > 0 and edge_id not in specific_edge_id):
- continue
- edge_group = {}
- for edge_node in edge_cc:
- far_nodes = mesh_nodes[edge_node].get('far')
- if far_nodes is None:
- continue
- for far_node in far_nodes:
- if far_node in edge_cc:
- continue
- context_ccs[edge_id].add(far_node)
- if mesh_nodes[far_node].get('edge_id') is not None:
- if edge_group.get(mesh_nodes[far_node]['edge_id']) is None:
- edge_group[mesh_nodes[far_node]['edge_id']] = set()
- edge_group[mesh_nodes[far_node]['edge_id']].add(far_node)
- if len(edge_cc) > 2:
- for edge_key in [*edge_group.keys()]:
- if len(edge_group[edge_key]) == 1:
- context_ccs[edge_id].remove([*edge_group[edge_key]][0])
- for edge_id, edge_cc in enumerate(edge_ccs):
- if inpaint_iter != 0:
- continue
- tmp_intouched_nodes = set()
- for edge_node in edge_cc:
- raw_intouched_nodes = set(mesh_nodes[edge_node].get('near')) if mesh_nodes[edge_node].get('near') is not None else set()
- tmp_intouched_nodes |= set([xx for xx in raw_intouched_nodes if mesh_nodes[xx].get('edge_id') is not None and \
- len(context_ccs[mesh_nodes[xx].get('edge_id')]) > 0])
- intouched_ccs[edge_id] |= tmp_intouched_nodes
- tmp_intouched_nodes = None
- mask_ccs = copy.deepcopy(edge_ccs)
- forbidden_len = 3
- forbidden_map = np.ones((mesh.graph['H'] - forbidden_len, mesh.graph['W'] - forbidden_len))
- forbidden_map = np.pad(forbidden_map, ((forbidden_len, forbidden_len), (forbidden_len, forbidden_len)), mode='constant').astype(np.bool)
- cur_tmp_mask_map = np.zeros_like(forbidden_map).astype(np.bool)
- passive_background = 10 if 10 is not None else background_thickness
- passive_context = 1 if 1 is not None else context_thickness
-
- for edge_id, edge_cc in enumerate(edge_ccs):
- cur_mask_cc = None; cur_mask_cc = []
- cur_context_cc = None; cur_context_cc = []
- cur_accomp_near_cc = None; cur_accomp_near_cc = []
- cur_invalid_extend_edge_cc = None; cur_invalid_extend_edge_cc = []
- cur_comp_far_cc = None; cur_comp_far_cc = []
- tmp_erode = []
- if len(context_ccs[edge_id]) == 0 or (len(specific_edge_id) > 0 and edge_id not in specific_edge_id):
- continue
- for i in range(max(background_thickness, context_thickness)):
- cur_tmp_mask_map.fill(False)
- if i == 0:
- tmp_mask_nodes = copy.deepcopy(mask_ccs[edge_id])
- tmp_intersect_nodes = []
- tmp_intersect_context_nodes = []
- mask_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool)
- context_depth = np.zeros((mesh.graph['H'], mesh.graph['W']))
- comp_cnt_depth = np.zeros((mesh.graph['H'], mesh.graph['W']))
- connect_map = np.zeros((mesh.graph['H'], mesh.graph['W']))
- for node in tmp_mask_nodes:
- mask_map[node[0], node[1]] = True
- depth_count = 0
- if mesh_nodes[node].get('far') is not None:
- for comp_cnt_node in mesh_nodes[node]['far']:
- comp_cnt_depth[node[0], node[1]] += abs(comp_cnt_node[2])
- depth_count += 1
- if depth_count > 0:
- comp_cnt_depth[node[0], node[1]] = comp_cnt_depth[node[0], node[1]] / depth_count
- connect_node = []
- if mesh_nodes[node].get('connect_point_id') is not None:
- connect_node.append(mesh_nodes[node]['connect_point_id'])
- connect_point_id = np.bincount(connect_node).argmax() if len(connect_node) > 0 else -1
- if connect_point_id > -1 and connect_points_ccs is not None:
- for xx in connect_points_ccs[connect_point_id]:
- if connect_map[xx[0], xx[1]] == 0:
- connect_map[xx[0], xx[1]] = xx[2]
- if mesh_nodes[node].get('connect_point_exception') is not None:
- for xx in mesh_nodes[node]['connect_point_exception']:
- if connect_map[xx[0], xx[1]] == 0:
- connect_map[xx[0], xx[1]] = xx[2]
- tmp_context_nodes = [*context_ccs[edge_id]]
- tmp_erode.append([*context_ccs[edge_id]])
- context_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool)
- if (context_map.astype(np.uint8) * mask_map.astype(np.uint8)).max() > 0:
- import pdb; pdb.set_trace()
- for node in tmp_context_nodes:
- context_map[node[0], node[1]] = True
- context_depth[node[0], node[1]] = node[2]
- context_map[mask_map == True] = False
- if (context_map.astype(np.uint8) * mask_map.astype(np.uint8)).max() > 0:
- import pdb; pdb.set_trace()
- tmp_intouched_nodes = [*intouched_ccs[edge_id]]
- intouched_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool)
- for node in tmp_intouched_nodes: intouched_map[node[0], node[1]] = True
- intouched_map[mask_map == True] = False
- tmp_redundant_nodes = set()
- tmp_noncont_nodes = set()
- noncont_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool)
- intersect_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool)
- intersect_context_map = np.zeros((mesh.graph['H'], mesh.graph['W']), dtype=np.bool)
- if i > passive_background and inpaint_iter == 0:
- new_tmp_intersect_nodes = None
- new_tmp_intersect_nodes = []
- for node in tmp_intersect_nodes:
- nes = mesh.neighbors(node)
- for ne in nes:
- if bool(context_map[ne[0], ne[1]]) is False and \
- bool(mask_map[ne[0], ne[1]]) is False and \
- bool(forbidden_map[ne[0], ne[1]]) is True and \
- bool(intouched_map[ne[0], ne[1]]) is False and\
- bool(intersect_map[ne[0], ne[1]]) is False and\
- bool(intersect_context_map[ne[0], ne[1]]) is False:
- break_flag = False
- if (i - passive_background) % 2 == 0 and (i - passive_background) % 8 != 0:
- four_nes = [xx for xx in[[ne[0] - 1, ne[1]], [ne[0] + 1, ne[1]], [ne[0], ne[1] - 1], [ne[0], ne[1] + 1]] \
- if 0 <= xx[0] < mesh.graph['H'] and 0 <= xx[1] < mesh.graph['W']]
- for fne in four_nes:
- if bool(mask_map[fne[0], fne[1]]) is True:
- break_flag = True
- break
- if break_flag is True:
- continue
- intersect_map[ne[0], ne[1]] = True
- new_tmp_intersect_nodes.append(ne)
- tmp_intersect_nodes = None
- tmp_intersect_nodes = new_tmp_intersect_nodes
-
- if i > passive_context and inpaint_iter == 1:
- new_tmp_intersect_context_nodes = None
- new_tmp_intersect_context_nodes = []
- for node in tmp_intersect_context_nodes:
- nes = mesh.neighbors(node)
- for ne in nes:
- if bool(context_map[ne[0], ne[1]]) is False and \
- bool(mask_map[ne[0], ne[1]]) is False and \
- bool(forbidden_map[ne[0], ne[1]]) is True and \
- bool(intouched_map[ne[0], ne[1]]) is False and\
- bool(intersect_map[ne[0], ne[1]]) is False and \
- bool(intersect_context_map[ne[0], ne[1]]) is False:
- intersect_context_map[ne[0], ne[1]] = True
- new_tmp_intersect_context_nodes.append(ne)
- tmp_intersect_context_nodes = None
- tmp_intersect_context_nodes = new_tmp_intersect_context_nodes
-
- new_tmp_mask_nodes = None
- new_tmp_mask_nodes = []
- for node in tmp_mask_nodes:
- four_nes = {xx:[] for xx in [(node[0] - 1, node[1]), (node[0] + 1, node[1]), (node[0], node[1] - 1), (node[0], node[1] + 1)] if \
- 0 <= xx[0] < connect_map.shape[0] and 0 <= xx[1] < connect_map.shape[1]}
- if inpaint_iter > 0:
- for ne in four_nes.keys():
- if connect_map[ne[0], ne[1]] == True:
- tmp_context_nodes.append((ne[0], ne[1], connect_map[ne[0], ne[1]]))
- context_map[ne[0], ne[1]] = True
- nes = mesh.neighbors(node)
- if inpaint_iter > 0:
- for ne in nes: four_nes[(ne[0], ne[1])].append(ne[2])
- nes = []
- for kfne, vfnes in four_nes.items(): vfnes.sort(key = lambda xx: abs(xx), reverse=True)
- for kfne, vfnes in four_nes.items():
- for vfne in vfnes: nes.append((kfne[0], kfne[1], vfne))
- for ne in nes:
- if bool(context_map[ne[0], ne[1]]) is False and \
- bool(mask_map[ne[0], ne[1]]) is False and \
- bool(forbidden_map[ne[0], ne[1]]) is True and \
- bool(intouched_map[ne[0], ne[1]]) is False and \
- bool(intersect_map[ne[0], ne[1]]) is False and \
- bool(intersect_context_map[ne[0], ne[1]]) is False:
- if i == passive_background and inpaint_iter == 0:
- if np.any(context_map[max(ne[0] - 1, 0):min(ne[0] + 2, mesh.graph['H']), max(ne[1] - 1, 0):min(ne[1] + 2, mesh.graph['W'])]) == True:
- intersect_map[ne[0], ne[1]] = True
- tmp_intersect_nodes.append(ne)
- continue
- if i < background_thickness:
- if inpaint_iter == 0:
- cur_mask_cc.append(ne)
- elif mesh_nodes[ne].get('inpaint_id') == 1:
- cur_mask_cc.append(ne)
- else:
- continue
- mask_ccs[edge_id].add(ne)
- if inpaint_iter == 0:
- if comp_cnt_depth[node[0], node[1]] > 0 and comp_cnt_depth[ne[0], ne[1]] == 0:
- comp_cnt_depth[ne[0], ne[1]] = comp_cnt_depth[node[0], node[1]]
- if mesh_nodes[ne].get('far') is not None:
- for comp_far_node in mesh_nodes[ne]['far']:
- cur_comp_far_cc.append(comp_far_node)
- cur_accomp_near_cc.append(ne)
- cur_invalid_extend_edge_cc.append(comp_far_node)
- if mesh_nodes[ne].get('edge_id') is not None and \
- len(context_ccs[mesh_nodes[ne].get('edge_id')]) > 0:
- intouched_fars = set(mesh_nodes[ne].get('far')) if mesh_nodes[ne].get('far') is not None else set()
- accum_intouched_fars = set(intouched_fars)
- for intouched_far in intouched_fars:
- accum_intouched_fars |= set([*mesh.neighbors(intouched_far)])
- for intouched_far in accum_intouched_fars:
- if bool(mask_map[intouched_far[0], intouched_far[1]]) is True or \
- bool(context_map[intouched_far[0], intouched_far[1]]) is True:
- continue
- tmp_redundant_nodes.add(intouched_far)
- intouched_map[intouched_far[0], intouched_far[1]] = True
- if mesh_nodes[ne].get('near') is not None:
- intouched_nears = set(mesh_nodes[ne].get('near'))
- for intouched_near in intouched_nears:
- if bool(mask_map[intouched_near[0], intouched_near[1]]) is True or \
- bool(context_map[intouched_near[0], intouched_near[1]]) is True:
- continue
- tmp_redundant_nodes.add(intouched_near)
- intouched_map[intouched_near[0], intouched_near[1]] = True
- if not (mesh_nodes[ne].get('inpaint_id') != 1 and inpaint_iter == 1):
- new_tmp_mask_nodes.append(ne)
- mask_map[ne[0], ne[1]] = True
- tmp_mask_nodes = new_tmp_mask_nodes
-
- new_tmp_context_nodes = None
- new_tmp_context_nodes = []
- for node in tmp_context_nodes:
- nes = mesh.neighbors(node)
- if inpaint_iter > 0:
- four_nes = {(node[0] - 1, node[1]):[], (node[0] + 1, node[1]):[], (node[0], node[1] - 1):[], (node[0], node[1] + 1):[]}
- for ne in nes: four_nes[(ne[0], ne[1])].append(ne[2])
- nes = []
- for kfne, vfnes in four_nes.items(): vfnes.sort(key = lambda xx: abs(xx), reverse=True)
- for kfne, vfnes in four_nes.items():
- for vfne in vfnes: nes.append((kfne[0], kfne[1], vfne))
- for ne in nes:
- mask_flag = (bool(mask_map[ne[0], ne[1]]) is False)
- if bool(context_map[ne[0], ne[1]]) is False and mask_flag and \
- bool(forbidden_map[ne[0], ne[1]]) is True and bool(noncont_map[ne[0], ne[1]]) is False and \
- bool(intersect_context_map[ne[0], ne[1]]) is False:
- if i == passive_context and inpaint_iter == 1:
- mnes = mesh.neighbors(ne)
- if any([mask_map[mne[0], mne[1]] == True for mne in mnes]) is True:
- intersect_context_map[ne[0], ne[1]] = True
- tmp_intersect_context_nodes.append(ne)
- continue
- if False and mesh_nodes[ne].get('near') is not None and mesh_nodes[ne].get('edge_id') != edge_id:
- noncont_nears = set(mesh_nodes[ne].get('near'))
- for noncont_near in noncont_nears:
- if bool(context_map[noncont_near[0], noncont_near[1]]) is False:
- tmp_noncont_nodes.add(noncont_near)
- noncont_map[noncont_near[0], noncont_near[1]] = True
- new_tmp_context_nodes.append(ne)
- context_map[ne[0], ne[1]] = True
- context_depth[ne[0], ne[1]] = ne[2]
- cur_context_cc.extend(new_tmp_context_nodes)
- tmp_erode.append(new_tmp_context_nodes)
- tmp_context_nodes = None
- tmp_context_nodes = new_tmp_context_nodes
- new_tmp_intouched_nodes = None; new_tmp_intouched_nodes = []
-
- for node in tmp_intouched_nodes:
- if bool(context_map[node[0], node[1]]) is True or bool(mask_map[node[0], node[1]]) is True:
- continue
- nes = mesh.neighbors(node)
-
- for ne in nes:
- if bool(context_map[ne[0], ne[1]]) is False and \
- bool(mask_map[ne[0], ne[1]]) is False and \
- bool(intouched_map[ne[0], ne[1]]) is False and \
- bool(forbidden_map[ne[0], ne[1]]) is True:
- new_tmp_intouched_nodes.append(ne)
- intouched_map[ne[0], ne[1]] = True
- tmp_intouched_nodes = None
- tmp_intouched_nodes = set(new_tmp_intouched_nodes)
- new_tmp_redundant_nodes = None; new_tmp_redundant_nodes = []
- for node in tmp_redundant_nodes:
- if bool(context_map[node[0], node[1]]) is True or \
- bool(mask_map[node[0], node[1]]) is True:
- continue
- nes = mesh.neighbors(node)
-
- for ne in nes:
- if bool(context_map[ne[0], ne[1]]) is False and \
- bool(mask_map[ne[0], ne[1]]) is False and \
- bool(intouched_map[ne[0], ne[1]]) is False and \
- bool(forbidden_map[ne[0], ne[1]]) is True:
- new_tmp_redundant_nodes.append(ne)
- intouched_map[ne[0], ne[1]] = True
- tmp_redundant_nodes = None
- tmp_redundant_nodes = set(new_tmp_redundant_nodes)
- new_tmp_noncont_nodes = None; new_tmp_noncont_nodes = []
- for node in tmp_noncont_nodes:
- if bool(context_map[node[0], node[1]]) is True or \
- bool(mask_map[node[0], node[1]]) is True:
- continue
- nes = mesh.neighbors(node)
- rmv_flag = False
- for ne in nes:
- if bool(context_map[ne[0], ne[1]]) is False and \
- bool(mask_map[ne[0], ne[1]]) is False and \
- bool(noncont_map[ne[0], ne[1]]) is False and \
- bool(forbidden_map[ne[0], ne[1]]) is True:
- patch_context_map = context_map[max(ne[0] - 1, 0):min(ne[0] + 2, context_map.shape[0]),
- max(ne[1] - 1, 0):min(ne[1] + 2, context_map.shape[1])]
- if bool(np.any(patch_context_map)) is True:
- new_tmp_noncont_nodes.append(ne)
- noncont_map[ne[0], ne[1]] = True
- tmp_noncont_nodes = None
- tmp_noncont_nodes = set(new_tmp_noncont_nodes)
- if inpaint_iter == 0:
- depth_dict = get_depth_from_maps(context_map, mask_map, context_depth, mesh.graph['H'], mesh.graph['W'], log_depth=config['log_depth'])
- mask_size = get_valid_size(depth_dict['mask'])
- mask_size = dilate_valid_size(mask_size, depth_dict['mask'], dilate=[20, 20])
- context_size = get_valid_size(depth_dict['context'])
- context_size = dilate_valid_size(context_size, depth_dict['context'], dilate=[20, 20])
- union_size = size_operation(mask_size, context_size, operation='+')
- depth_dict = depth_inpainting(None, None, None, None, mesh, config, union_size, depth_feat_model, None, given_depth_dict=depth_dict, spdb=False)
- near_depth_map, raw_near_depth_map = np.zeros((mesh.graph['H'], mesh.graph['W'])), np.zeros((mesh.graph['H'], mesh.graph['W']))
- filtered_comp_far_cc, filtered_accomp_near_cc = set(), set()
- for node in cur_accomp_near_cc:
- near_depth_map[node[0], node[1]] = depth_dict['output'][node[0], node[1]]
- raw_near_depth_map[node[0], node[1]] = node[2]
- for node in cur_comp_far_cc:
- four_nes = [xx for xx in [(node[0] - 1, node[1]), (node[0] + 1, node[1]), (node[0], node[1] - 1), (node[0], node[1] + 1)] \
- if 0 <= xx[0] < mesh.graph['H'] and 0 <= xx[1] < mesh.graph['W'] and \
- near_depth_map[xx[0], xx[1]] != 0 and \
- abs(near_depth_map[xx[0], xx[1]]) < abs(node[2])]
- if len(four_nes) > 0:
- filtered_comp_far_cc.add(node)
- for ne in four_nes:
- filtered_accomp_near_cc.add((ne[0], ne[1], -abs(raw_near_depth_map[ne[0], ne[1]])))
- cur_comp_far_cc, cur_accomp_near_cc = filtered_comp_far_cc, filtered_accomp_near_cc
- mask_ccs[edge_id] |= set(cur_mask_cc)
- context_ccs[edge_id] |= set(cur_context_cc)
- accomp_extend_context_ccs[edge_id] |= set(cur_accomp_near_cc).intersection(cur_mask_cc)
- extend_edge_ccs[edge_id] |= set(cur_accomp_near_cc).intersection(cur_mask_cc)
- extend_context_ccs[edge_id] |= set(cur_comp_far_cc)
- invalid_extend_edge_ccs[edge_id] |= set(cur_invalid_extend_edge_cc)
- erode_size = [0]
- for tmp in tmp_erode:
- erode_size.append(len(tmp))
- if len(erode_size) > 1:
- erode_size[-1] += erode_size[-2]
- if inpaint_iter == 0:
- tmp_width = config['depth_edge_dilate']
- else:
- tmp_width = 0
- while float(erode_size[tmp_width]) / (erode_size[-1] + 1e-6) > 0.3:
- tmp_width = tmp_width - 1
- try:
- if tmp_width == 0:
- erode_context_ccs[edge_id] = set([])
- else:
- erode_context_ccs[edge_id] = set(reduce(lambda x, y : x + y, [] + tmp_erode[:tmp_width]))
- except:
- import pdb; pdb.set_trace()
- erode_context_cc = copy.deepcopy(erode_context_ccs[edge_id])
- for erode_context_node in erode_context_cc:
- if (inpaint_iter != 0 and (mesh_nodes[erode_context_node].get('inpaint_id') is None or
- mesh_nodes[erode_context_node].get('inpaint_id') == 0)):
- erode_context_ccs[edge_id].remove(erode_context_node)
- else:
- context_ccs[edge_id].remove(erode_context_node)
- context_map = np.zeros((mesh.graph['H'], mesh.graph['W']))
- for context_node in context_ccs[edge_id]:
- context_map[context_node[0], context_node[1]] = 1
- extend_context_ccs[edge_id] = extend_context_ccs[edge_id] - mask_ccs[edge_id] - accomp_extend_context_ccs[edge_id]
- if inpaint_iter == 0:
- all_ecnt_cc = set()
- for ecnt_id, ecnt_cc in enumerate(extend_context_ccs):
- constraint_context_ids = set()
- constraint_context_cc = set()
- constraint_erode_context_cc = set()
- tmp_mask_cc = set()
- accum_context_cc = None; accum_context_cc = []
- for ecnt_node in accomp_extend_context_ccs[ecnt_id]:
- if edge_maps[ecnt_node[0], ecnt_node[1]] > -1:
- constraint_context_ids.add(int(round(edge_maps[ecnt_node[0], ecnt_node[1]])))
- constraint_erode_context_cc = erode_context_ccs[ecnt_id]
- for constraint_context_id in constraint_context_ids:
- constraint_context_cc = constraint_context_cc | context_ccs[constraint_context_id] | erode_context_ccs[constraint_context_id]
- constraint_erode_context_cc = constraint_erode_context_cc | erode_context_ccs[constraint_context_id]
- for i in range(background_thickness):
- if i == 0:
- tmp_context_nodes = copy.deepcopy(ecnt_cc)
- tmp_invalid_context_nodes = copy.deepcopy(invalid_extend_edge_ccs[ecnt_id])
- tmp_mask_nodes = copy.deepcopy(accomp_extend_context_ccs[ecnt_id])
- tmp_context_map = np.zeros((mesh.graph['H'], mesh.graph['W'])).astype(np.bool)
- tmp_mask_map = np.zeros((mesh.graph['H'], mesh.graph['W'])).astype(np.bool)
- tmp_invalid_context_map = np.zeros((mesh.graph['H'], mesh.graph['W'])).astype(np.bool)
- for node in tmp_mask_nodes:
- tmp_mask_map[node[0], node[1]] = True
- for node in context_ccs[ecnt_id]:
- tmp_context_map[node[0], node[1]] = True
- for node in erode_context_ccs[ecnt_id]:
- tmp_context_map[node[0], node[1]] = True
- for node in extend_context_ccs[ecnt_id]:
- tmp_context_map[node[0], node[1]] = True
- for node in invalid_extend_edge_ccs[ecnt_id]:
- tmp_invalid_context_map[node[0], node[1]] = True
- init_invalid_context_map = tmp_invalid_context_map.copy()
- init_context_map = tmp
- if (tmp_mask_map.astype(np.uint8) * tmp_context_map.astype(np.uint8)).max() > 0:
- import pdb; pdb.set_trace()
- if vis_edge_id is not None and ecnt_id == vis_edge_id:
- f, ((ax1, ax2)) = plt.subplots(1, 2, sharex=True, sharey=True)
- ax1.imshow(tmp_context_map * 1); ax2.imshow(init_invalid_context_map * 1 + tmp_context_map * 2)
- plt.show()
- import pdb; pdb.set_trace()
- else:
- tmp_context_nodes = new_tmp_context_nodes
- new_tmp_context_nodes = None
- tmp_mask_nodes = new_tmp_mask_nodes
- new_tmp_mask_nodes = None
- tmp_invalid_context_nodes = new_tmp_invalid_context_nodes
- new_tmp_invalid_context_nodes = None
- new_tmp_context_nodes = None
- new_tmp_context_nodes = []
- new_tmp_invalid_context_nodes = None
- new_tmp_invalid_context_nodes = []
- new_tmp_mask_nodes = set([])
- for node in tmp_context_nodes:
- for ne in mesh.neighbors(node):
- if ne in constraint_context_cc and \
- bool(tmp_mask_map[ne[0], ne[1]]) is False and \
- bool(tmp_context_map[ne[0], ne[1]]) is False and \
- bool(forbidden_map[ne[0], ne[1]]) is True:
- new_tmp_context_nodes.append(ne)
- tmp_context_map[ne[0], ne[1]] = True
- accum_context_cc.extend(new_tmp_context_nodes)
- for node in tmp_invalid_context_nodes:
- for ne in mesh.neighbors(node):
- if bool(tmp_mask_map[ne[0], ne[1]]) is False and \
- bool(tmp_context_map[ne[0], ne[1]]) is False and \
- bool(tmp_invalid_context_map[ne[0], ne[1]]) is False and \
- bool(forbidden_map[ne[0], ne[1]]) is True:
- tmp_invalid_context_map[ne[0], ne[1]] = True
- new_tmp_invalid_context_nodes.append(ne)
- for node in tmp_mask_nodes:
- for ne in mesh.neighbors(node):
- if bool(tmp_mask_map[ne[0], ne[1]]) is False and \
- bool(tmp_context_map[ne[0], ne[1]]) is False and \
- bool(tmp_invalid_context_map[ne[0], ne[1]]) is False and \
- bool(forbidden_map[ne[0], ne[1]]) is True:
- new_tmp_mask_nodes.add(ne)
- tmp_mask_map[ne[0], ne[1]] = True
- init_invalid_context_map[tmp_context_map] = False
- _, tmp_label_map = cv2.connectedComponents((init_invalid_context_map | tmp_context_map).astype(np.uint8), connectivity=8)
- tmp_label_ids = set(np.unique(tmp_label_map[init_invalid_context_map]))
- if (tmp_mask_map.astype(np.uint8) * tmp_context_map.astype(np.uint8)).max() > 0:
- import pdb; pdb.set_trace()
- if vis_edge_id is not None and ecnt_id == vis_edge_id:
- f, ((ax1, ax2)) = plt.subplots(1, 2, sharex=True, sharey=True)
- ax1.imshow(tmp_label_map); ax2.imshow(init_invalid_context_map * 1 + tmp_context_map * 2)
- plt.show()
- import pdb; pdb.set_trace()
- extend_context_ccs[ecnt_id] |= set(accum_context_cc)
- extend_context_ccs[ecnt_id] = extend_context_ccs[ecnt_id] - mask_ccs[ecnt_id]
- extend_erode_context_ccs[ecnt_id] = extend_context_ccs[ecnt_id] & constraint_erode_context_cc
- extend_context_ccs[ecnt_id] = extend_context_ccs[ecnt_id] - extend_erode_context_ccs[ecnt_id] - erode_context_ccs[ecnt_id]
- tmp_context_cc = context_ccs[ecnt_id] - extend_erode_context_ccs[ecnt_id] - erode_context_ccs[ecnt_id]
- if len(tmp_context_cc) > 0:
- context_ccs[ecnt_id] = tmp_context_cc
- tmp_mask_cc = tmp_mask_cc - context_ccs[ecnt_id] - erode_context_ccs[ecnt_id]
- mask_ccs[ecnt_id] = mask_ccs[ecnt_id] | tmp_mask_cc
-
- return context_ccs, mask_ccs, broken_mask_ccs, edge_ccs, erode_context_ccs, invalid_extend_edge_ccs, edge_maps, extend_context_ccs, extend_edge_ccs, extend_erode_context_ccs
-
-def DL_inpaint_edge(mesh,
- info_on_pix,
- config,
- image,
- depth,
- context_ccs,
- erode_context_ccs,
- extend_context_ccs,
- extend_erode_context_ccs,
- mask_ccs,
- broken_mask_ccs,
- edge_ccs,
- extend_edge_ccs,
- init_mask_connect,
- edge_maps,
- rgb_model=None,
- depth_edge_model=None,
- depth_edge_model_init=None,
- depth_feat_model=None,
- specific_edge_id=-1,
- specific_edge_loc=None,
- inpaint_iter=0):
-
- if isinstance(config["gpu_ids"], int) and (config["gpu_ids"] >= 0):
- device = config["gpu_ids"]
- else:
- device = "cpu"
-
- edge_map = np.zeros_like(depth)
- new_edge_ccs = [set() for _ in range(len(edge_ccs))]
- edge_maps_with_id = edge_maps
- edge_condition = lambda x, m: m.nodes[x].get('far') is not None and len(m.nodes[x].get('far')) > 0
- edge_map = get_map_from_ccs(edge_ccs, mesh.graph['H'], mesh.graph['W'], mesh, edge_condition)
- np_depth, np_image = depth.copy(), image.copy()
- image_c = image.shape[-1]
- image = torch.FloatTensor(image.transpose(2, 0, 1)).unsqueeze(0).to(device)
- if depth.ndim < 3:
- depth = depth[..., None]
- depth = torch.FloatTensor(depth.transpose(2, 0, 1)).unsqueeze(0).to(device)
- mesh.graph['max_edge_id'] = len(edge_ccs)
- connnect_points_ccs = [set() for _ in range(len(edge_ccs))]
- gp_time, tmp_mesh_time, bilateral_time = 0, 0, 0
- edges_infos = dict()
- edges_in_mask = [set() for _ in range(len(edge_ccs))]
- tmp_specific_edge_id = []
- for edge_id, (context_cc, mask_cc, erode_context_cc, extend_context_cc, edge_cc) in enumerate(zip(context_ccs, mask_ccs, erode_context_ccs, extend_context_ccs, edge_ccs)):
- if len(specific_edge_id) > 0:
- if edge_id not in specific_edge_id:
- continue
- if len(context_cc) < 1 or len(mask_cc) < 1:
- continue
- edge_dict = get_edge_from_nodes(context_cc | extend_context_cc, erode_context_cc | extend_erode_context_ccs[edge_id], mask_cc, edge_cc, extend_edge_ccs[edge_id],
- mesh.graph['H'], mesh.graph['W'], mesh)
- edge_dict['edge'], end_depth_maps, _ = \
- filter_irrelevant_edge_new(edge_dict['self_edge'], edge_dict['comp_edge'],
- edge_map,
- edge_maps_with_id,
- edge_id,
- edge_dict['context'],
- edge_dict['depth'], mesh, context_cc | erode_context_cc | extend_context_cc | extend_erode_context_ccs[edge_id], spdb=False)
- if specific_edge_loc is not None and \
- (specific_edge_loc is not None and edge_dict['mask'][specific_edge_loc[0], specific_edge_loc[1]] == 0):
- continue
- mask_size = get_valid_size(edge_dict['mask'])
- mask_size = dilate_valid_size(mask_size, edge_dict['mask'], dilate=[20, 20])
- context_size = get_valid_size(edge_dict['context'])
- context_size = dilate_valid_size(context_size, edge_dict['context'], dilate=[20, 20])
- union_size = size_operation(mask_size, context_size, operation='+')
- patch_edge_dict = dict()
- patch_edge_dict['mask'], patch_edge_dict['context'], patch_edge_dict['rgb'], \
- patch_edge_dict['disp'], patch_edge_dict['edge'] = \
- crop_maps_by_size(union_size, edge_dict['mask'], edge_dict['context'],
- edge_dict['rgb'], edge_dict['disp'], edge_dict['edge'])
- x_anchor, y_anchor = [union_size['x_min'], union_size['x_max']], [union_size['y_min'], union_size['y_max']]
- tensor_edge_dict = convert2tensor(patch_edge_dict)
- input_edge_feat = torch.cat((tensor_edge_dict['rgb'],
- tensor_edge_dict['disp'],
- tensor_edge_dict['edge'],
- 1 - tensor_edge_dict['context'],
- tensor_edge_dict['mask']), dim=1)
- if require_depth_edge(patch_edge_dict['edge'], patch_edge_dict['mask']) and inpaint_iter == 0:
- with torch.no_grad():
- depth_edge_output = depth_edge_model.forward_3P(tensor_edge_dict['mask'],
- tensor_edge_dict['context'],
- tensor_edge_dict['rgb'],
- tensor_edge_dict['disp'],
- tensor_edge_dict['edge'],
- unit_length=128,
- cuda=device)
- depth_edge_output = depth_edge_output.cpu()
- tensor_edge_dict['output'] = (depth_edge_output> config['ext_edge_threshold']).float() * tensor_edge_dict['mask'] + tensor_edge_dict['edge']
- else:
- tensor_edge_dict['output'] = tensor_edge_dict['edge']
- depth_edge_output = tensor_edge_dict['edge'] + 0
- patch_edge_dict['output'] = tensor_edge_dict['output'].squeeze().data.cpu().numpy()
- edge_dict['output'] = np.zeros((mesh.graph['H'], mesh.graph['W']))
- edge_dict['output'][union_size['x_min']:union_size['x_max'], union_size['y_min']:union_size['y_max']] = \
- patch_edge_dict['output']
- if require_depth_edge(patch_edge_dict['edge'], patch_edge_dict['mask']) and inpaint_iter == 0:
- if ((depth_edge_output> config['ext_edge_threshold']).float() * tensor_edge_dict['mask']).max() > 0:
- try:
- edge_dict['fpath_map'], edge_dict['npath_map'], break_flag, npaths, fpaths, invalid_edge_id = \
- clean_far_edge_new(edge_dict['output'], end_depth_maps, edge_dict['mask'], edge_dict['context'], mesh, info_on_pix, edge_dict['self_edge'], inpaint_iter, config)
- except:
- import pdb; pdb.set_trace()
- pre_npath_map = edge_dict['npath_map'].copy()
- if config.get('repeat_inpaint_edge') is True:
- for _ in range(2):
- tmp_input_edge = ((edge_dict['npath_map'] > -1) + edge_dict['edge']).clip(0, 1)
- patch_tmp_input_edge = crop_maps_by_size(union_size, tmp_input_edge)[0]
- tensor_input_edge = torch.FloatTensor(patch_tmp_input_edge)[None, None, ...]
- depth_edge_output = depth_edge_model.forward_3P(tensor_edge_dict['mask'],
- tensor_edge_dict['context'],
- tensor_edge_dict['rgb'],
- tensor_edge_dict['disp'],
- tensor_input_edge,
- unit_length=128,
- cuda=device)
- depth_edge_output = depth_edge_output.cpu()
- depth_edge_output = (depth_edge_output> config['ext_edge_threshold']).float() * tensor_edge_dict['mask'] + tensor_edge_dict['edge']
- depth_edge_output = depth_edge_output.squeeze().data.cpu().numpy()
- full_depth_edge_output = np.zeros((mesh.graph['H'], mesh.graph['W']))
- full_depth_edge_output[union_size['x_min']:union_size['x_max'], union_size['y_min']:union_size['y_max']] = \
- depth_edge_output
- edge_dict['fpath_map'], edge_dict['npath_map'], break_flag, npaths, fpaths, invalid_edge_id = \
- clean_far_edge_new(full_depth_edge_output, end_depth_maps, edge_dict['mask'], edge_dict['context'], mesh, info_on_pix, edge_dict['self_edge'], inpaint_iter, config)
- for nid in npaths.keys():
- npath, fpath = npaths[nid], fpaths[nid]
- start_mx, start_my, end_mx, end_my = -1, -1, -1, -1
- if end_depth_maps[npath[0][0], npath[0][1]] != 0:
- start_mx, start_my = npath[0][0], npath[0][1]
- if end_depth_maps[npath[-1][0], npath[-1][1]] != 0:
- end_mx, end_my = npath[-1][0], npath[-1][1]
- if start_mx == -1:
- import pdb; pdb.set_trace()
- valid_end_pt = () if end_mx == -1 else (end_mx, end_my, info_on_pix[(end_mx, end_my)][0]['depth'])
- new_edge_info = dict(fpath=fpath,
- npath=npath,
- cont_end_pts=valid_end_pt,
- mask_id=edge_id,
- comp_edge_id=nid,
- depth=end_depth_maps[start_mx, start_my])
- if edges_infos.get((start_mx, start_my)) is None:
- edges_infos[(start_mx, start_my)] = []
- edges_infos[(start_mx, start_my)].append(new_edge_info)
- edges_in_mask[edge_id].add((start_mx, start_my))
- if len(valid_end_pt) > 0:
- new_edge_info = dict(fpath=fpath[::-1],
- npath=npath[::-1],
- cont_end_pts=(start_mx, start_my, info_on_pix[(start_mx, start_my)][0]['depth']),
- mask_id=edge_id,
- comp_edge_id=nid,
- depth=end_depth_maps[end_mx, end_my])
- if edges_infos.get((end_mx, end_my)) is None:
- edges_infos[(end_mx, end_my)] = []
- edges_infos[(end_mx, end_my)].append(new_edge_info)
- edges_in_mask[edge_id].add((end_mx, end_my))
- for edge_id, (context_cc, mask_cc, erode_context_cc, extend_context_cc, edge_cc) in enumerate(zip(context_ccs, mask_ccs, erode_context_ccs, extend_context_ccs, edge_ccs)):
- if len(specific_edge_id) > 0:
- if edge_id not in specific_edge_id:
- continue
- if len(context_cc) < 1 or len(mask_cc) < 1:
- continue
- edge_dict = get_edge_from_nodes(context_cc | extend_context_cc, erode_context_cc | extend_erode_context_ccs[edge_id], mask_cc, edge_cc, extend_edge_ccs[edge_id],
- mesh.graph['H'], mesh.graph['W'], mesh)
- if specific_edge_loc is not None and \
- (specific_edge_loc is not None and edge_dict['mask'][specific_edge_loc[0], specific_edge_loc[1]] == 0):
- continue
- else:
- tmp_specific_edge_id.append(edge_id)
- edge_dict['edge'], end_depth_maps, _ = \
- filter_irrelevant_edge_new(edge_dict['self_edge'], edge_dict['comp_edge'],
- edge_map,
- edge_maps_with_id,
- edge_id,
- edge_dict['context'],
- edge_dict['depth'], mesh, context_cc | erode_context_cc | extend_context_cc | extend_erode_context_ccs[edge_id], spdb=False)
- discard_map = np.zeros_like(edge_dict['edge'])
- mask_size = get_valid_size(edge_dict['mask'])
- mask_size = dilate_valid_size(mask_size, edge_dict['mask'], dilate=[20, 20])
- context_size = get_valid_size(edge_dict['context'])
- context_size = dilate_valid_size(context_size, edge_dict['context'], dilate=[20, 20])
- union_size = size_operation(mask_size, context_size, operation='+')
- patch_edge_dict = dict()
- patch_edge_dict['mask'], patch_edge_dict['context'], patch_edge_dict['rgb'], \
- patch_edge_dict['disp'], patch_edge_dict['edge'] = \
- crop_maps_by_size(union_size, edge_dict['mask'], edge_dict['context'],
- edge_dict['rgb'], edge_dict['disp'], edge_dict['edge'])
- x_anchor, y_anchor = [union_size['x_min'], union_size['x_max']], [union_size['y_min'], union_size['y_max']]
- tensor_edge_dict = convert2tensor(patch_edge_dict)
- input_edge_feat = torch.cat((tensor_edge_dict['rgb'],
- tensor_edge_dict['disp'],
- tensor_edge_dict['edge'],
- 1 - tensor_edge_dict['context'],
- tensor_edge_dict['mask']), dim=1)
- edge_dict['output'] = edge_dict['edge'].copy()
-
- if require_depth_edge(patch_edge_dict['edge'], patch_edge_dict['mask']) and inpaint_iter == 0:
- edge_dict['fpath_map'], edge_dict['npath_map'] = edge_dict['fpath_map'] * 0 - 1, edge_dict['npath_map'] * 0 - 1
- end_pts = edges_in_mask[edge_id]
- for end_pt in end_pts:
- cur_edge_infos = edges_infos[(end_pt[0], end_pt[1])]
- cur_info = [xx for xx in cur_edge_infos if xx['mask_id'] == edge_id][0]
- other_infos = [xx for xx in cur_edge_infos if xx['mask_id'] != edge_id and len(xx['cont_end_pts']) > 0]
- if len(cur_info['cont_end_pts']) > 0 or (len(cur_info['cont_end_pts']) == 0 and len(other_infos) == 0):
- for fnode in cur_info['fpath']:
- edge_dict['fpath_map'][fnode[0], fnode[1]] = cur_info['comp_edge_id']
- for fnode in cur_info['npath']:
- edge_dict['npath_map'][fnode[0], fnode[1]] = cur_info['comp_edge_id']
- fnmap = edge_dict['fpath_map'] * 1
- fnmap[edge_dict['npath_map'] != -1] = edge_dict['npath_map'][edge_dict['npath_map'] != -1]
- for end_pt in end_pts:
- cur_edge_infos = edges_infos[(end_pt[0], end_pt[1])]
- cur_info = [xx for xx in cur_edge_infos if xx['mask_id'] == edge_id][0]
- cur_depth = cur_info['depth']
- other_infos = [xx for xx in cur_edge_infos if xx['mask_id'] != edge_id and len(xx['cont_end_pts']) > 0]
- comp_edge_id = cur_info['comp_edge_id']
- if len(cur_info['cont_end_pts']) == 0 and len(other_infos) > 0:
- other_infos = sorted(other_infos, key=lambda aa: abs(abs(aa['cont_end_pts'][2]) - abs(cur_depth)))
- for other_info in other_infos:
- tmp_fmap, tmp_nmap = np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1, np.zeros((mesh.graph['H'], mesh.graph['W'])) - 1
- for fnode in other_info['fpath']:
- if fnmap[fnode[0], fnode[1]] != -1:
- tmp_fmap = tmp_fmap * 0 - 1
- break
- else:
- tmp_fmap[fnode[0], fnode[1]] = comp_edge_id
- if fnmap[fnode[0], fnode[1]] != -1:
- continue
- for fnode in other_info['npath']:
- if fnmap[fnode[0], fnode[1]] != -1:
- tmp_nmap = tmp_nmap * 0 - 1
- break
- else:
- tmp_nmap[fnode[0], fnode[1]] = comp_edge_id
- if fnmap[fnode[0], fnode[1]] != -1:
- continue
- break
- if min(tmp_fmap.max(), tmp_nmap.max()) != -1:
- edge_dict['fpath_map'] = tmp_fmap
- edge_dict['fpath_map'][edge_dict['valid_area'] == 0] = -1
- edge_dict['npath_map'] = tmp_nmap
- edge_dict['npath_map'][edge_dict['valid_area'] == 0] = -1
- discard_map = ((tmp_nmap != -1).astype(np.uint8) + (tmp_fmap != -1).astype(np.uint8)) * edge_dict['mask']
- else:
- for fnode in cur_info['fpath']:
- edge_dict['fpath_map'][fnode[0], fnode[1]] = cur_info['comp_edge_id']
- for fnode in cur_info['npath']:
- edge_dict['npath_map'][fnode[0], fnode[1]] = cur_info['comp_edge_id']
- if edge_dict['npath_map'].min() == 0 or edge_dict['fpath_map'].min() == 0:
- import pdb; pdb.set_trace()
- edge_dict['output'] = (edge_dict['npath_map'] > -1) * edge_dict['mask'] + edge_dict['context'] * edge_dict['edge']
- mesh, _, _, _ = create_placeholder(edge_dict['context'], edge_dict['mask'],
- edge_dict['depth'], edge_dict['fpath_map'],
- edge_dict['npath_map'], mesh, inpaint_iter,
- edge_ccs,
- extend_edge_ccs[edge_id],
- edge_maps_with_id,
- edge_id)
-
- dxs, dys = np.where(discard_map != 0)
- for dx, dy in zip(dxs, dys):
- mesh.nodes[(dx, dy)]['inpaint_twice'] = False
- depth_dict = depth_inpainting(context_cc, extend_context_cc, erode_context_cc | extend_erode_context_ccs[edge_id], mask_cc, mesh, config, union_size, depth_feat_model, edge_dict['output'])
- refine_depth_output = depth_dict['output']*depth_dict['mask']
- for near_id in np.unique(edge_dict['npath_map'])[1:]:
- refine_depth_output = refine_depth_around_edge(refine_depth_output.copy(),
- (edge_dict['fpath_map'] == near_id).astype(np.uint8) * edge_dict['mask'],
- (edge_dict['fpath_map'] == near_id).astype(np.uint8),
- (edge_dict['npath_map'] == near_id).astype(np.uint8) * edge_dict['mask'],
- depth_dict['mask'].copy(),
- depth_dict['output'] * depth_dict['context'],
- config)
- depth_dict['output'][depth_dict['mask'] > 0] = refine_depth_output[depth_dict['mask'] > 0]
- rgb_dict = get_rgb_from_nodes(context_cc | extend_context_cc,
- erode_context_cc | extend_erode_context_ccs[edge_id], mask_cc, mesh.graph['H'], mesh.graph['W'], mesh)
- if np.all(rgb_dict['mask'] == edge_dict['mask']) is False:
- import pdb; pdb.set_trace()
- rgb_dict['edge'] = edge_dict['output']
- patch_rgb_dict = dict()
- patch_rgb_dict['mask'], patch_rgb_dict['context'], patch_rgb_dict['rgb'], \
- patch_rgb_dict['edge'] = crop_maps_by_size(union_size, rgb_dict['mask'],
- rgb_dict['context'], rgb_dict['rgb'],
- rgb_dict['edge'])
- tensor_rgb_dict = convert2tensor(patch_rgb_dict)
- resize_rgb_dict = {k: v.clone() for k, v in tensor_rgb_dict.items()}
- max_hw = np.array([*patch_rgb_dict['mask'].shape[-2:]]).max()
- init_frac = config['largest_size'] / (np.array([*patch_rgb_dict['mask'].shape[-2:]]).prod() ** 0.5)
- resize_hw = [patch_rgb_dict['mask'].shape[-2] * init_frac, patch_rgb_dict['mask'].shape[-1] * init_frac]
- resize_max_hw = max(resize_hw)
- frac = (np.floor(resize_max_hw / 128.) * 128.) / max_hw
- if frac < 1:
- resize_mark = torch.nn.functional.interpolate(torch.cat((resize_rgb_dict['mask'],
- resize_rgb_dict['context']),
- dim=1),
- scale_factor=frac,
- mode='area')
- resize_rgb_dict['mask'] = (resize_mark[:, 0:1] > 0).float()
- resize_rgb_dict['context'] = (resize_mark[:, 1:2] == 1).float()
- resize_rgb_dict['context'][resize_rgb_dict['mask'] > 0] = 0
- resize_rgb_dict['rgb'] = torch.nn.functional.interpolate(resize_rgb_dict['rgb'],
- scale_factor=frac,
- mode='area')
- resize_rgb_dict['rgb'] = resize_rgb_dict['rgb'] * resize_rgb_dict['context']
- resize_rgb_dict['edge'] = torch.nn.functional.interpolate(resize_rgb_dict['edge'],
- scale_factor=frac,
- mode='area')
- resize_rgb_dict['edge'] = (resize_rgb_dict['edge'] > 0).float() * 0
- resize_rgb_dict['edge'] = resize_rgb_dict['edge'] * (resize_rgb_dict['context'] + resize_rgb_dict['mask'])
- rgb_input_feat = torch.cat((resize_rgb_dict['rgb'], resize_rgb_dict['edge']), dim=1)
- rgb_input_feat[:, 3] = 1 - rgb_input_feat[:, 3]
- resize_mask = open_small_mask(resize_rgb_dict['mask'], resize_rgb_dict['context'], 3, 41)
- specified_hole = resize_mask
- with torch.no_grad():
- rgb_output = rgb_model.forward_3P(specified_hole,
- resize_rgb_dict['context'],
- resize_rgb_dict['rgb'],
- resize_rgb_dict['edge'],
- unit_length=128,
- cuda=device)
- rgb_output = rgb_output.cpu()
- if config.get('gray_image') is True:
- rgb_output = rgb_output.mean(1, keepdim=True).repeat((1,3,1,1))
- rgb_output = rgb_output.cpu()
- resize_rgb_dict['output'] = rgb_output * resize_rgb_dict['mask'] + resize_rgb_dict['rgb']
- tensor_rgb_dict['output'] = resize_rgb_dict['output']
- if frac < 1:
- tensor_rgb_dict['output'] = torch.nn.functional.interpolate(tensor_rgb_dict['output'],
- size=tensor_rgb_dict['mask'].shape[-2:],
- mode='bicubic')
- tensor_rgb_dict['output'] = tensor_rgb_dict['output'] * \
- tensor_rgb_dict['mask'] + (tensor_rgb_dict['rgb'] * tensor_rgb_dict['context'])
- patch_rgb_dict['output'] = tensor_rgb_dict['output'].data.cpu().numpy().squeeze().transpose(1,2,0)
- rgb_dict['output'] = np.zeros((mesh.graph['H'], mesh.graph['W'], 3))
- rgb_dict['output'][union_size['x_min']:union_size['x_max'], union_size['y_min']:union_size['y_max']] = \
- patch_rgb_dict['output']
-
- if require_depth_edge(patch_edge_dict['edge'], patch_edge_dict['mask']) or inpaint_iter > 0:
- edge_occlusion = True
- else:
- edge_occlusion = False
- for node in erode_context_cc:
- if rgb_dict['mask'][node[0], node[1]] > 0:
- for info in info_on_pix[(node[0], node[1])]:
- if abs(info['depth']) == abs(node[2]):
- info['update_color'] = (rgb_dict['output'][node[0], node[1]] * 255).astype(np.uint8)
- if frac < 1.:
- depth_edge_dilate_2_color_flag = False
- else:
- depth_edge_dilate_2_color_flag = True
- hxs, hys = np.where((rgb_dict['mask'] > 0) & (rgb_dict['erode'] == 0))
- for hx, hy in zip(hxs, hys):
- real_depth = None
- if abs(depth_dict['output'][hx, hy]) <= abs(np_depth[hx, hy]):
- depth_dict['output'][hx, hy] = np_depth[hx, hy] + 0.01
- node = (hx, hy, -depth_dict['output'][hx, hy])
- if info_on_pix.get((node[0], node[1])) is not None:
- for info in info_on_pix.get((node[0], node[1])):
- if info.get('inpaint_id') is None or abs(info['inpaint_id'] < mesh.nodes[(hx, hy)]['inpaint_id']):
- pre_depth = info['depth'] if info.get('real_depth') is None else info['real_depth']
- if abs(node[2]) < abs(pre_depth):
- node = (node[0], node[1], -(abs(pre_depth) + 0.001))
- if mesh.has_node(node):
- real_depth = node[2]
- while True:
- if mesh.has_node(node):
- node = (node[0], node[1], -(abs(node[2]) + 0.001))
- else:
- break
- if real_depth == node[2]:
- real_depth = None
- cur_disp = 1./node[2]
- if not(mesh.has_node(node)):
- if not mesh.has_node((node[0], node[1])):
- print("2D node not found.")
- import pdb; pdb.set_trace()
- if inpaint_iter == 1:
- paint = (rgb_dict['output'][hx, hy] * 255).astype(np.uint8)
- else:
- paint = (rgb_dict['output'][hx, hy] * 255).astype(np.uint8)
- ndict = dict(color=paint,
- synthesis=True,
- disp=cur_disp,
- cc_id=set([edge_id]),
- overlap_number=1.0,
- refine_depth=False,
- edge_occlusion=edge_occlusion,
- depth_edge_dilate_2_color_flag=depth_edge_dilate_2_color_flag,
- real_depth=real_depth)
- mesh, _, _ = refresh_node((node[0], node[1]), mesh.nodes[(node[0], node[1])], node, ndict, mesh, stime=True)
- if inpaint_iter == 0 and mesh.degree(node) < 4:
- connnect_points_ccs[edge_id].add(node)
- if info_on_pix.get((hx, hy)) is None:
- info_on_pix[(hx, hy)] = []
- new_info = {'depth':node[2],
- 'color': paint,
- 'synthesis':True,
- 'disp':cur_disp,
- 'cc_id':set([edge_id]),
- 'inpaint_id':inpaint_iter + 1,
- 'edge_occlusion':edge_occlusion,
- 'overlap_number':1.0,
- 'real_depth': real_depth}
- info_on_pix[(hx, hy)].append(new_info)
- specific_edge_id = tmp_specific_edge_id
- for erode_id, erode_context_cc in enumerate(erode_context_ccs):
- if len(specific_edge_id) > 0 and erode_id not in specific_edge_id:
- continue
- for erode_node in erode_context_cc:
- for info in info_on_pix[(erode_node[0], erode_node[1])]:
- if info['depth'] == erode_node[2]:
- info['color'] = info['update_color']
- mesh.nodes[erode_node]['color'] = info['update_color']
- np_image[(erode_node[0], erode_node[1])] = info['update_color']
- new_edge_ccs = [set() for _ in range(mesh.graph['max_edge_id'] + 1)]
- for node in mesh.nodes:
- if len(node) == 2:
- mesh.remove_node(node)
- continue
- if mesh.nodes[node].get('edge_id') is not None and mesh.nodes[node].get('inpaint_id') == inpaint_iter + 1:
- if mesh.nodes[node].get('inpaint_twice') is False:
- continue
- try:
- new_edge_ccs[mesh.nodes[node].get('edge_id')].add(node)
- except:
- import pdb; pdb.set_trace()
- specific_mask_nodes = None
- if inpaint_iter == 0:
- mesh, info_on_pix = refine_color_around_edge(mesh, info_on_pix, new_edge_ccs, config, False)
-
- return mesh, info_on_pix, specific_mask_nodes, new_edge_ccs, connnect_points_ccs, np_image
-
-
-def write_ply(image,
- depth,
- int_mtx,
- ply_name,
- config,
- rgb_model,
- depth_edge_model,
- depth_edge_model_init,
- depth_feat_model):
- depth = depth.astype(np.float64)
- input_mesh, xy2depth, image, depth = create_mesh(depth, image, int_mtx, config)
-
- H, W = input_mesh.graph['H'], input_mesh.graph['W']
- input_mesh = tear_edges(input_mesh, config['depth_threshold'], xy2depth)
- input_mesh, info_on_pix = generate_init_node(input_mesh, config, min_node_in_cc=200)
- edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=False)
- edge_canvas = np.zeros((H, W)) - 1
-
- input_mesh, info_on_pix, depth = reassign_floating_island(input_mesh, info_on_pix, image, depth)
- input_mesh = update_status(input_mesh, info_on_pix)
- specific_edge_id = []
- edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=True)
- pre_depth = depth.copy()
- input_mesh, info_on_pix, edge_mesh, depth, aft_mark = remove_dangling(input_mesh, edge_ccs, edge_mesh, info_on_pix, image, depth, config)
-
- input_mesh, depth, info_on_pix = update_status(input_mesh, info_on_pix, depth)
- edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=True)
- edge_canvas = np.zeros((H, W)) - 1
-
- mesh, info_on_pix, depth = fill_missing_node(input_mesh, info_on_pix, image, depth)
- if config['extrapolate_border'] is True:
- pre_depth = depth.copy()
- input_mesh, info_on_pix, depth = refresh_bord_depth(input_mesh, info_on_pix, image, depth)
- input_mesh = remove_node_feat(input_mesh, 'edge_id')
- aft_depth = depth.copy()
- input_mesh, info_on_pix, depth, image = enlarge_border(input_mesh, info_on_pix, depth, image, config)
- noext_H, noext_W = H, W
- H, W = image.shape[:2]
- input_mesh, info_on_pix = fill_dummy_bord(input_mesh, info_on_pix, image, depth, config)
- edge_ccs, input_mesh, edge_mesh = \
- group_edges(input_mesh, config, image, remove_conflict_ordinal=True)
- input_mesh = combine_end_node(input_mesh, edge_mesh, edge_ccs, depth)
- input_mesh, depth, info_on_pix = update_status(input_mesh, info_on_pix, depth)
- edge_ccs, input_mesh, edge_mesh = \
- group_edges(input_mesh, config, image, remove_conflict_ordinal=True, spdb=False)
- input_mesh = remove_redundant_edge(input_mesh, edge_mesh, edge_ccs, info_on_pix, config, redundant_number=config['redundant_number'], spdb=False)
- input_mesh, depth, info_on_pix = update_status(input_mesh, info_on_pix, depth)
- edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=True)
- input_mesh = combine_end_node(input_mesh, edge_mesh, edge_ccs, depth)
- input_mesh = remove_redundant_edge(input_mesh, edge_mesh, edge_ccs, info_on_pix, config, redundant_number=config['redundant_number'], invalid=True, spdb=False)
- input_mesh, depth, info_on_pix = update_status(input_mesh, info_on_pix, depth)
- edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=True)
- input_mesh = combine_end_node(input_mesh, edge_mesh, edge_ccs, depth)
- input_mesh, depth, info_on_pix = update_status(input_mesh, info_on_pix, depth)
- edge_ccs, input_mesh, edge_mesh = group_edges(input_mesh, config, image, remove_conflict_ordinal=True)
- edge_condition = lambda x, m: m.nodes[x].get('far') is not None and len(m.nodes[x].get('far')) > 0
- edge_map = get_map_from_ccs(edge_ccs, input_mesh.graph['H'], input_mesh.graph['W'], input_mesh, edge_condition)
- other_edge_with_id = get_map_from_ccs(edge_ccs, input_mesh.graph['H'], input_mesh.graph['W'], real_id=True)
- info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs,
- depth_edge_model, depth_feat_model, rgb_model, config, direc="up")
- info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs,
- depth_edge_model, depth_feat_model, rgb_model, config, direc="left")
- info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs,
- depth_edge_model, depth_feat_model, rgb_model, config, direc="down")
- info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs,
- depth_edge_model, depth_feat_model, rgb_model, config, direc="right")
- info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs,
- depth_edge_model, depth_feat_model, rgb_model, config, direc="right-up")
- info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs,
- depth_edge_model, depth_feat_model, rgb_model, config, direc="right-down")
- info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs,
- depth_edge_model, depth_feat_model, rgb_model, config, direc="left-up")
- info_on_pix, input_mesh, image, depth, edge_ccs = extrapolate(input_mesh, info_on_pix, image, depth, other_edge_with_id, edge_map, edge_ccs,
- depth_edge_model, depth_feat_model, rgb_model, config, direc="left-down")
- specific_edge_loc = None
- specific_edge_id = []
- vis_edge_id = None
- context_ccs, mask_ccs, broken_mask_ccs, edge_ccs, erode_context_ccs, \
- init_mask_connect, edge_maps, extend_context_ccs, extend_edge_ccs, extend_erode_context_ccs = \
- context_and_holes(input_mesh,
- edge_ccs,
- config,
- specific_edge_id,
- specific_edge_loc,
- depth_feat_model,
- inpaint_iter=0,
- vis_edge_id=vis_edge_id)
- edge_canvas = np.zeros((H, W))
- mask = np.zeros((H, W))
- context = np.zeros((H, W))
- vis_edge_ccs = filter_edge(input_mesh, edge_ccs, config)
- edge_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W'])) - 1
- specific_edge_loc = None
- FG_edge_maps = edge_maps.copy()
- edge_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W'])) - 1
- # for cc_id, cc in enumerate(edge_ccs):
- # for node in cc:
- # edge_canvas[node[0], node[1]] = cc_id
- # f, ((ax0, ax1, ax2)) = plt.subplots(1, 3, sharex=True, sharey=True); ax0.imshow(1./depth); ax1.imshow(image); ax2.imshow(edge_canvas); plt.show()
- input_mesh, info_on_pix, specific_edge_nodes, new_edge_ccs, connect_points_ccs, image = DL_inpaint_edge(input_mesh,
- info_on_pix,
- config,
- image,
- depth,
- context_ccs,
- erode_context_ccs,
- extend_context_ccs,
- extend_erode_context_ccs,
- mask_ccs,
- broken_mask_ccs,
- edge_ccs,
- extend_edge_ccs,
- init_mask_connect,
- edge_maps,
- rgb_model,
- depth_edge_model,
- depth_edge_model_init,
- depth_feat_model,
- specific_edge_id,
- specific_edge_loc,
- inpaint_iter=0)
- specific_edge_id = []
- edge_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W']))
- connect_points_ccs = [set() for _ in connect_points_ccs]
- context_ccs, mask_ccs, broken_mask_ccs, edge_ccs, erode_context_ccs, init_mask_connect, \
- edge_maps, extend_context_ccs, extend_edge_ccs, extend_erode_context_ccs = \
- context_and_holes(input_mesh, new_edge_ccs, config, specific_edge_id, specific_edge_loc, depth_feat_model, connect_points_ccs, inpaint_iter=1)
- mask_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W']))
- context_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W']))
- erode_context_ccs_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W']))
- edge_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W']))
- # edge_canvas = np.zeros((input_mesh.graph['H'], input_mesh.graph['W'])) - 1
- # for cc_id, cc in enumerate(edge_ccs):
- # for node in cc:
- # edge_canvas[node[0], node[1]] = cc_id
- specific_edge_id = []
- input_mesh, info_on_pix, specific_edge_nodes, new_edge_ccs, _, image = DL_inpaint_edge(input_mesh,
- info_on_pix,
- config,
- image,
- depth,
- context_ccs,
- erode_context_ccs,
- extend_context_ccs,
- extend_erode_context_ccs,
- mask_ccs,
- broken_mask_ccs,
- edge_ccs,
- extend_edge_ccs,
- init_mask_connect,
- edge_maps,
- rgb_model,
- depth_edge_model,
- depth_edge_model_init,
- depth_feat_model,
- specific_edge_id,
- specific_edge_loc,
- inpaint_iter=1)
- vertex_id = 0
- input_mesh.graph['H'], input_mesh.graph['W'] = input_mesh.graph['noext_H'], input_mesh.graph['noext_W']
- background_canvas = np.zeros((input_mesh.graph['H'],
- input_mesh.graph['W'],
- 3))
- ply_flag = config.get('save_ply')
- if ply_flag is True:
- node_str_list = []
- else:
- node_str_color = []
- node_str_point = []
- out_fmt = lambda x, x_flag: str(x) if x_flag is True else x
- point_time = 0
- hlight_time = 0
- cur_id_time = 0
- node_str_time = 0
- generate_face_time = 0
- point_list = []
- k_00, k_02, k_11, k_12 = \
- input_mesh.graph['cam_param_pix_inv'][0, 0], input_mesh.graph['cam_param_pix_inv'][0, 2], \
- input_mesh.graph['cam_param_pix_inv'][1, 1], input_mesh.graph['cam_param_pix_inv'][1, 2]
- w_offset = input_mesh.graph['woffset']
- h_offset = input_mesh.graph['hoffset']
- for pix_xy, pix_list in info_on_pix.items():
- for pix_idx, pix_info in enumerate(pix_list):
- pix_depth = pix_info['depth'] if pix_info.get('real_depth') is None else pix_info['real_depth']
- str_pt = [out_fmt(x, ply_flag) for x in reproject_3d_int_detail(pix_xy[0], pix_xy[1], pix_depth,
- k_00, k_02, k_11, k_12, w_offset, h_offset)]
- if input_mesh.has_node((pix_xy[0], pix_xy[1], pix_info['depth'])) is False:
- return False
- continue
- if pix_info.get('overlap_number') is not None:
- str_color = [out_fmt(x, ply_flag) for x in (pix_info['color']/pix_info['overlap_number']).astype(np.uint8).tolist()]
- else:
- str_color = [out_fmt(x, ply_flag) for x in pix_info['color'].tolist()]
- if pix_info.get('edge_occlusion') is True:
- str_color.append(out_fmt(4, ply_flag))
- else:
- if pix_info.get('inpaint_id') is None:
- str_color.append(out_fmt(1, ply_flag))
- else:
- str_color.append(out_fmt(pix_info.get('inpaint_id') + 1, ply_flag))
- if pix_info.get('modified_border') is True or pix_info.get('ext_pixel') is True:
- if len(str_color) == 4:
- str_color[-1] = out_fmt(5, ply_flag)
- else:
- str_color.append(out_fmt(5, ply_flag))
- pix_info['cur_id'] = vertex_id
- input_mesh.nodes[(pix_xy[0], pix_xy[1], pix_info['depth'])]['cur_id'] = out_fmt(vertex_id, ply_flag)
- vertex_id += 1
- if ply_flag is True:
- node_str_list.append(' '.join(str_pt) + ' ' + ' '.join(str_color) + '\n')
- else:
- node_str_color.append(str_color)
- node_str_point.append(str_pt)
- str_faces = generate_face(input_mesh, info_on_pix, config)
- if config['save_ply'] is True:
- print("Writing mesh file %s ..." % ply_name)
- with open(ply_name, 'w') as ply_fi:
- ply_fi.write('ply\n' + 'format ascii 1.0\n')
- ply_fi.write('comment H ' + str(int(input_mesh.graph['H'])) + '\n')
- ply_fi.write('comment W ' + str(int(input_mesh.graph['W'])) + '\n')
- ply_fi.write('comment hFov ' + str(float(input_mesh.graph['hFov'])) + '\n')
- ply_fi.write('comment vFov ' + str(float(input_mesh.graph['vFov'])) + '\n')
- ply_fi.write('element vertex ' + str(len(node_str_list)) + '\n')
- ply_fi.write('property float x\n' + \
- 'property float y\n' + \
- 'property float z\n' + \
- 'property uchar red\n' + \
- 'property uchar green\n' + \
- 'property uchar blue\n' + \
- 'property uchar alpha\n')
- ply_fi.write('element face ' + str(len(str_faces)) + '\n')
- ply_fi.write('property list uchar int vertex_index\n')
- ply_fi.write('end_header\n')
- ply_fi.writelines(node_str_list)
- ply_fi.writelines(str_faces)
- ply_fi.close()
- return input_mesh
- else:
- H = int(input_mesh.graph['H'])
- W = int(input_mesh.graph['W'])
- hFov = input_mesh.graph['hFov']
- vFov = input_mesh.graph['vFov']
- node_str_color = np.array(node_str_color).astype(np.float32)
- node_str_color[..., :3] = node_str_color[..., :3] / 255.
- node_str_point = np.array(node_str_point)
- str_faces = np.array(str_faces)
-
- return node_str_point, node_str_color, str_faces, H, W, hFov, vFov
-
-def read_ply(mesh_fi):
- ply_fi = open(mesh_fi, 'r')
- Height = None
- Width = None
- hFov = None
- vFov = None
- while True:
- line = ply_fi.readline().split('\n')[0]
- if line.startswith('element vertex'):
- num_vertex = int(line.split(' ')[-1])
- elif line.startswith('element face'):
- num_face = int(line.split(' ')[-1])
- elif line.startswith('comment'):
- if line.split(' ')[1] == 'H':
- Height = int(line.split(' ')[-1].split('\n')[0])
- if line.split(' ')[1] == 'W':
- Width = int(line.split(' ')[-1].split('\n')[0])
- if line.split(' ')[1] == 'hFov':
- hFov = float(line.split(' ')[-1].split('\n')[0])
- if line.split(' ')[1] == 'vFov':
- vFov = float(line.split(' ')[-1].split('\n')[0])
- elif line.startswith('end_header'):
- break
- contents = ply_fi.readlines()
- vertex_infos = contents[:num_vertex]
- face_infos = contents[num_vertex:]
- verts = []
- colors = []
- faces = []
- for v_info in vertex_infos:
- str_info = [float(v) for v in v_info.split('\n')[0].split(' ')]
- if len(str_info) == 6:
- vx, vy, vz, r, g, b = str_info
- else:
- vx, vy, vz, r, g, b, hi = str_info
- verts.append([vx, vy, vz])
- colors.append([r, g, b, hi])
- verts = np.array(verts)
- try:
- colors = np.array(colors)
- colors[..., :3] = colors[..., :3]/255.
- except:
- import pdb
- pdb.set_trace()
-
- for f_info in face_infos:
- _, v1, v2, v3 = [int(f) for f in f_info.split('\n')[0].split(' ')]
- faces.append([v1, v2, v3])
- faces = np.array(faces)
-
-
- return verts, colors, faces, Height, Width, hFov, vFov
-
-
-class Canvas_view():
- def __init__(self,
- fov,
- verts,
- faces,
- colors,
- canvas_size,
- factor=1,
- bgcolor='gray',
- proj='perspective',
- ):
- self.canvas = scene.SceneCanvas(bgcolor=bgcolor, size=(canvas_size*factor, canvas_size*factor))
- self.view = self.canvas.central_widget.add_view()
- self.view.camera = 'perspective'
- self.view.camera.fov = fov
- self.mesh = visuals.Mesh(shading=None)
- self.mesh.attach(Alpha(1.0))
- self.view.add(self.mesh)
- self.tr = self.view.camera.transform
- self.mesh.set_data(vertices=verts, faces=faces, vertex_colors=colors[:, :3])
- self.translate([0,0,0])
- self.rotate(axis=[1,0,0], angle=180)
- self.view_changed()
-
- def translate(self, trans=[0,0,0]):
- self.tr.translate(trans)
-
- def rotate(self, axis=[1,0,0], angle=0):
- self.tr.rotate(axis=axis, angle=angle)
-
- def view_changed(self):
- self.view.camera.view_changed()
-
- def render(self):
- return self.canvas.render()
-
- def reinit_mesh(self, verts, faces, colors):
- self.mesh.set_data(vertices=verts, faces=faces, vertex_colors=colors[:, :3])
-
- def reinit_camera(self, fov):
- self.view.camera.fov = fov
- self.view.camera.view_changed()
-
-
-def output_3d_photo(verts, colors, faces, Height, Width, hFov, vFov, tgt_poses, video_traj_types, ref_pose,
- output_dir, ref_image, int_mtx, config, image, videos_poses, video_basename, original_H=None, original_W=None,
- border=None, depth=None, normal_canvas=None, all_canvas=None, mean_loc_depth=None):
-
- cam_mesh = netx.Graph()
- cam_mesh.graph['H'] = Height
- cam_mesh.graph['W'] = Width
- cam_mesh.graph['original_H'] = original_H
- cam_mesh.graph['original_W'] = original_W
- int_mtx_real_x = int_mtx[0] * Width
- int_mtx_real_y = int_mtx[1] * Height
- cam_mesh.graph['hFov'] = 2 * np.arctan((1. / 2.) * ((cam_mesh.graph['original_W']) / int_mtx_real_x[0]))
- cam_mesh.graph['vFov'] = 2 * np.arctan((1. / 2.) * ((cam_mesh.graph['original_H']) / int_mtx_real_y[1]))
- colors = colors[..., :3]
-
- fov_in_rad = max(cam_mesh.graph['vFov'], cam_mesh.graph['hFov'])
- fov = (fov_in_rad * 180 / np.pi)
- print("fov: " + str(fov))
- init_factor = 1
- if config.get('anti_flickering') is True:
- init_factor = 3
- if (cam_mesh.graph['original_H'] is not None) and (cam_mesh.graph['original_W'] is not None):
- canvas_w = cam_mesh.graph['original_W']
- canvas_h = cam_mesh.graph['original_H']
- else:
- canvas_w = cam_mesh.graph['W']
- canvas_h = cam_mesh.graph['H']
- canvas_size = max(canvas_h, canvas_w)
- if normal_canvas is None:
- normal_canvas = Canvas_view(fov,
- verts,
- faces,
- colors,
- canvas_size=canvas_size,
- factor=init_factor,
- bgcolor='gray',
- proj='perspective')
- else:
- normal_canvas.reinit_mesh(verts, faces, colors)
- normal_canvas.reinit_camera(fov)
- img = normal_canvas.render()
- backup_img, backup_all_img, all_img_wo_bound = img.copy(), img.copy() * 0, img.copy() * 0
- img = cv2.resize(img, (int(img.shape[1] / init_factor), int(img.shape[0] / init_factor)), interpolation=cv2.INTER_AREA)
- if border is None:
- border = [0, img.shape[0], 0, img.shape[1]]
- H, W = cam_mesh.graph['H'], cam_mesh.graph['W']
- if (cam_mesh.graph['original_H'] is not None) and (cam_mesh.graph['original_W'] is not None):
- aspect_ratio = cam_mesh.graph['original_H'] / cam_mesh.graph['original_W']
- else:
- aspect_ratio = cam_mesh.graph['H'] / cam_mesh.graph['W']
- if aspect_ratio > 1:
- img_h_len = cam_mesh.graph['H'] if cam_mesh.graph.get('original_H') is None else cam_mesh.graph['original_H']
- img_w_len = img_h_len / aspect_ratio
- anchor = [0,
- img.shape[0],
- int(max(0, int((img.shape[1])//2 - img_w_len//2))),
- int(min(int((img.shape[1])//2 + img_w_len//2), (img.shape[1])-1))]
- elif aspect_ratio <= 1:
- img_w_len = cam_mesh.graph['W'] if cam_mesh.graph.get('original_W') is None else cam_mesh.graph['original_W']
- img_h_len = img_w_len * aspect_ratio
- anchor = [int(max(0, int((img.shape[0])//2 - img_h_len//2))),
- int(min(int((img.shape[0])//2 + img_h_len//2), (img.shape[0])-1)),
- 0,
- img.shape[1]]
- anchor = np.array(anchor)
- plane_width = np.tan(fov_in_rad/2.) * np.abs(mean_loc_depth)
- for video_pose, video_traj_type in zip(videos_poses, video_traj_types):
- stereos = []
- tops = []; buttoms = []; lefts = []; rights = []
- for tp_id, tp in enumerate(video_pose):
- rel_pose = np.linalg.inv(np.dot(tp, np.linalg.inv(ref_pose)))
- axis, angle = transforms3d.axangles.mat2axangle(rel_pose[0:3, 0:3])
- normal_canvas.rotate(axis=axis, angle=(angle*180)/np.pi)
- normal_canvas.translate(rel_pose[:3,3])
- new_mean_loc_depth = mean_loc_depth - float(rel_pose[2, 3])
- if 'dolly' in video_traj_type:
- new_fov = float((np.arctan2(plane_width, np.array([np.abs(new_mean_loc_depth)])) * 180. / np.pi) * 2)
- normal_canvas.reinit_camera(new_fov)
- else:
- normal_canvas.reinit_camera(fov)
- normal_canvas.view_changed()
- img = normal_canvas.render()
- img = cv2.GaussianBlur(img,(int(init_factor//2 * 2 + 1), int(init_factor//2 * 2 + 1)), 0)
- img = cv2.resize(img, (int(img.shape[1] / init_factor), int(img.shape[0] / init_factor)), interpolation=cv2.INTER_AREA)
- img = img[anchor[0]:anchor[1], anchor[2]:anchor[3]]
- img = img[int(border[0]):int(border[1]), int(border[2]):int(border[3])]
-
- if any(np.array(config['crop_border']) > 0.0):
- H_c, W_c, _ = img.shape
- o_t = int(H_c * config['crop_border'][0])
- o_l = int(W_c * config['crop_border'][1])
- o_b = int(H_c * config['crop_border'][2])
- o_r = int(W_c * config['crop_border'][3])
- img = img[o_t:H_c-o_b, o_l:W_c-o_r]
- img = cv2.resize(img, (W_c, H_c), interpolation=cv2.INTER_CUBIC)
-
- """
- img = cv2.resize(img, (int(img.shape[1] / init_factor), int(img.shape[0] / init_factor)), interpolation=cv2.INTER_CUBIC)
- img = img[anchor[0]:anchor[1], anchor[2]:anchor[3]]
- img = img[int(border[0]):int(border[1]), int(border[2]):int(border[3])]
-
- if config['crop_border'] is True:
- top, buttom, left, right = find_largest_rect(img, bg_color=(128, 128, 128))
- tops.append(top); buttoms.append(buttom); lefts.append(left); rights.append(right)
- """
- stereos.append(img[..., :3])
- normal_canvas.translate(-rel_pose[:3,3])
- normal_canvas.rotate(axis=axis, angle=-(angle*180)/np.pi)
- normal_canvas.view_changed()
- """
- if config['crop_border'] is True:
- atop, abuttom = min(max(tops), img.shape[0]//2 - 10), max(min(buttoms), img.shape[0]//2 + 10)
- aleft, aright = min(max(lefts), img.shape[1]//2 - 10), max(min(rights), img.shape[1]//2 + 10)
- atop -= atop % 2; abuttom -= abuttom % 2; aleft -= aleft % 2; aright -= aright % 2
- else:
- atop = 0; abuttom = img.shape[0] - img.shape[0] % 2; aleft = 0; aright = img.shape[1] - img.shape[1] % 2
- """
- atop = 0; abuttom = img.shape[0] - img.shape[0] % 2; aleft = 0; aright = img.shape[1] - img.shape[1] % 2
- crop_stereos = []
- for stereo in stereos:
- crop_stereos.append((stereo[atop:abuttom, aleft:aright, :3] * 1).astype(np.uint8))
- stereos = crop_stereos
- clip = ImageSequenceClip(stereos, fps=config['fps'])
- if isinstance(video_basename, list):
- video_basename = video_basename[0]
- clip.write_videofile(os.path.join(output_dir, video_basename + '_' + video_traj_type + '.mp4'), fps=config['fps'])
-
-
-
- return normal_canvas, all_canvas
diff --git a/spaces/KarmKarma/rvc-models-genshinimpact/infer_pack/attentions.py b/spaces/KarmKarma/rvc-models-genshinimpact/infer_pack/attentions.py
deleted file mode 100644
index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000
--- a/spaces/KarmKarma/rvc-models-genshinimpact/infer_pack/attentions.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from infer_pack import commons
-from infer_pack import modules
-from infer_pack.modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- window_size=10,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- window_size=window_size,
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- proximal_bias=False,
- proximal_init=True,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- proximal_bias=proximal_bias,
- proximal_init=proximal_init,
- )
- )
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(
- MultiHeadAttention(
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- causal=True,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
- device=x.device, dtype=x.dtype
- )
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(
- self,
- channels,
- out_channels,
- n_heads,
- p_dropout=0.0,
- window_size=None,
- heads_share=True,
- block_length=None,
- proximal_bias=False,
- proximal_init=False,
- ):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
- self.emb_rel_v = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert (
- t_s == t_t
- ), "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(
- query / math.sqrt(self.k_channels), key_relative_embeddings
- )
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(
- device=scores.device, dtype=scores.dtype
- )
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert (
- t_s == t_t
- ), "Local attention is only available for self-attention."
- block_mask = (
- torch.ones_like(scores)
- .triu(-self.block_length)
- .tril(self.block_length)
- )
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(
- self.emb_rel_v, t_s
- )
- output = output + self._matmul_with_relative_values(
- relative_weights, value_relative_embeddings
- )
- output = (
- output.transpose(2, 3).contiguous().view(b, d, t_t)
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
- )
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[
- :, slice_start_position:slice_end_position
- ]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
- )
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
- :, :, :length, length - 1 :
- ]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
- )
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- filter_channels,
- kernel_size,
- p_dropout=0.0,
- activation=None,
- causal=False,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/KazeDevID/RVC-Model/infer_pack/attentions.py b/spaces/KazeDevID/RVC-Model/infer_pack/attentions.py
deleted file mode 100644
index 77cb63ffccf3e33badf22d50862a64ba517b487f..0000000000000000000000000000000000000000
--- a/spaces/KazeDevID/RVC-Model/infer_pack/attentions.py
+++ /dev/null
@@ -1,417 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from infer_pack import commons
-from infer_pack import modules
-from infer_pack.modules import LayerNorm
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- window_size=10,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
-
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- window_size=window_size,
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(
- self,
- hidden_channels,
- filter_channels,
- n_heads,
- n_layers,
- kernel_size=1,
- p_dropout=0.0,
- proximal_bias=False,
- proximal_init=True,
- **kwargs
- ):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(
- MultiHeadAttention(
- hidden_channels,
- hidden_channels,
- n_heads,
- p_dropout=p_dropout,
- proximal_bias=proximal_bias,
- proximal_init=proximal_init,
- )
- )
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(
- MultiHeadAttention(
- hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout
- )
- )
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(
- FFN(
- hidden_channels,
- hidden_channels,
- filter_channels,
- kernel_size,
- p_dropout=p_dropout,
- causal=True,
- )
- )
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(
- device=x.device, dtype=x.dtype
- )
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(
- self,
- channels,
- out_channels,
- n_heads,
- p_dropout=0.0,
- window_size=None,
- heads_share=True,
- block_length=None,
- proximal_bias=False,
- proximal_init=False,
- ):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
- self.emb_rel_v = nn.Parameter(
- torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels)
- * rel_stddev
- )
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert (
- t_s == t_t
- ), "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(
- query / math.sqrt(self.k_channels), key_relative_embeddings
- )
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(
- device=scores.device, dtype=scores.dtype
- )
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert (
- t_s == t_t
- ), "Local attention is only available for self-attention."
- block_mask = (
- torch.ones_like(scores)
- .triu(-self.block_length)
- .tril(self.block_length)
- )
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(
- self.emb_rel_v, t_s
- )
- output = output + self._matmul_with_relative_values(
- relative_weights, value_relative_embeddings
- )
- output = (
- output.transpose(2, 3).contiguous().view(b, d, t_t)
- ) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]),
- )
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[
- :, slice_start_position:slice_end_position
- ]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(
- x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])
- )
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[
- :, :, :length, length - 1 :
- ]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(
- x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])
- )
- x_flat = x.view([batch, heads, length**2 + length * (length - 1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- filter_channels,
- kernel_size,
- p_dropout=0.0,
- activation=None,
- causal=False,
- ):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/KenjieDec/RemBG/rembg/commands/s_command.py b/spaces/KenjieDec/RemBG/rembg/commands/s_command.py
deleted file mode 100644
index 4fba1ce3e71e0e55fd2d9d81a4b35b5637c1be37..0000000000000000000000000000000000000000
--- a/spaces/KenjieDec/RemBG/rembg/commands/s_command.py
+++ /dev/null
@@ -1,285 +0,0 @@
-import json
-import os
-import webbrowser
-from typing import Optional, Tuple, cast
-
-import aiohttp
-import click
-import gradio as gr
-import uvicorn
-from asyncer import asyncify
-from fastapi import Depends, FastAPI, File, Form, Query
-from fastapi.middleware.cors import CORSMiddleware
-from starlette.responses import Response
-
-from .._version import get_versions
-from ..bg import remove
-from ..session_factory import new_session
-from ..sessions import sessions_names
-from ..sessions.base import BaseSession
-
-
-@click.command(
- name="s",
- help="for a http server",
-)
-@click.option(
- "-p",
- "--port",
- default=5000,
- type=int,
- show_default=True,
- help="port",
-)
-@click.option(
- "-l",
- "--log_level",
- default="info",
- type=str,
- show_default=True,
- help="log level",
-)
-@click.option(
- "-t",
- "--threads",
- default=None,
- type=int,
- show_default=True,
- help="number of worker threads",
-)
-def s_command(port: int, log_level: str, threads: int) -> None:
- sessions: dict[str, BaseSession] = {}
- tags_metadata = [
- {
- "name": "Background Removal",
- "description": "Endpoints that perform background removal with different image sources.",
- "externalDocs": {
- "description": "GitHub Source",
- "url": "https://github.com/danielgatis/rembg",
- },
- },
- ]
- app = FastAPI(
- title="Rembg",
- description="Rembg is a tool to remove images background. That is it.",
- version=get_versions()["version"],
- contact={
- "name": "Daniel Gatis",
- "url": "https://github.com/danielgatis",
- "email": "danielgatis@gmail.com",
- },
- license_info={
- "name": "MIT License",
- "url": "https://github.com/danielgatis/rembg/blob/main/LICENSE.txt",
- },
- openapi_tags=tags_metadata,
- docs_url="/api",
- )
-
- app.add_middleware(
- CORSMiddleware,
- allow_credentials=True,
- allow_origins=["*"],
- allow_methods=["*"],
- allow_headers=["*"],
- )
-
- class CommonQueryParams:
- def __init__(
- self,
- model: str = Query(
- description="Model to use when processing image",
- regex=r"(" + "|".join(sessions_names) + ")",
- default="u2net",
- ),
- a: bool = Query(default=False, description="Enable Alpha Matting"),
- af: int = Query(
- default=240,
- ge=0,
- le=255,
- description="Alpha Matting (Foreground Threshold)",
- ),
- ab: int = Query(
- default=10,
- ge=0,
- le=255,
- description="Alpha Matting (Background Threshold)",
- ),
- ae: int = Query(
- default=10, ge=0, description="Alpha Matting (Erode Structure Size)"
- ),
- om: bool = Query(default=False, description="Only Mask"),
- ppm: bool = Query(default=False, description="Post Process Mask"),
- bgc: Optional[str] = Query(default=None, description="Background Color"),
- extras: Optional[str] = Query(
- default=None, description="Extra parameters as JSON"
- ),
- ):
- self.model = model
- self.a = a
- self.af = af
- self.ab = ab
- self.ae = ae
- self.om = om
- self.ppm = ppm
- self.extras = extras
- self.bgc = (
- cast(Tuple[int, int, int, int], tuple(map(int, bgc.split(","))))
- if bgc
- else None
- )
-
- class CommonQueryPostParams:
- def __init__(
- self,
- model: str = Form(
- description="Model to use when processing image",
- regex=r"(" + "|".join(sessions_names) + ")",
- default="u2net",
- ),
- a: bool = Form(default=False, description="Enable Alpha Matting"),
- af: int = Form(
- default=240,
- ge=0,
- le=255,
- description="Alpha Matting (Foreground Threshold)",
- ),
- ab: int = Form(
- default=10,
- ge=0,
- le=255,
- description="Alpha Matting (Background Threshold)",
- ),
- ae: int = Form(
- default=10, ge=0, description="Alpha Matting (Erode Structure Size)"
- ),
- om: bool = Form(default=False, description="Only Mask"),
- ppm: bool = Form(default=False, description="Post Process Mask"),
- bgc: Optional[str] = Query(default=None, description="Background Color"),
- extras: Optional[str] = Query(
- default=None, description="Extra parameters as JSON"
- ),
- ):
- self.model = model
- self.a = a
- self.af = af
- self.ab = ab
- self.ae = ae
- self.om = om
- self.ppm = ppm
- self.extras = extras
- self.bgc = (
- cast(Tuple[int, int, int, int], tuple(map(int, bgc.split(","))))
- if bgc
- else None
- )
-
- def im_without_bg(content: bytes, commons: CommonQueryParams) -> Response:
- kwargs = {}
-
- if commons.extras:
- try:
- kwargs.update(json.loads(commons.extras))
- except Exception:
- pass
-
- return Response(
- remove(
- content,
- session=sessions.setdefault(commons.model, new_session(commons.model)),
- alpha_matting=commons.a,
- alpha_matting_foreground_threshold=commons.af,
- alpha_matting_background_threshold=commons.ab,
- alpha_matting_erode_size=commons.ae,
- only_mask=commons.om,
- post_process_mask=commons.ppm,
- bgcolor=commons.bgc,
- **kwargs,
- ),
- media_type="image/png",
- )
-
- @app.on_event("startup")
- def startup():
- try:
- webbrowser.open(f"http://localhost:{port}")
- except Exception:
- pass
-
- if threads is not None:
- from anyio import CapacityLimiter
- from anyio.lowlevel import RunVar
-
- RunVar("_default_thread_limiter").set(CapacityLimiter(threads))
-
- @app.get(
- path="/api/remove",
- tags=["Background Removal"],
- summary="Remove from URL",
- description="Removes the background from an image obtained by retrieving an URL.",
- )
- async def get_index(
- url: str = Query(
- default=..., description="URL of the image that has to be processed."
- ),
- commons: CommonQueryParams = Depends(),
- ):
- async with aiohttp.ClientSession() as session:
- async with session.get(url) as response:
- file = await response.read()
- return await asyncify(im_without_bg)(file, commons)
-
- @app.post(
- path="/api/remove",
- tags=["Background Removal"],
- summary="Remove from Stream",
- description="Removes the background from an image sent within the request itself.",
- )
- async def post_index(
- file: bytes = File(
- default=...,
- description="Image file (byte stream) that has to be processed.",
- ),
- commons: CommonQueryPostParams = Depends(),
- ):
- return await asyncify(im_without_bg)(file, commons) # type: ignore
-
- def gr_app(app):
- def inference(input_path, model):
- output_path = "output.png"
- with open(input_path, "rb") as i:
- with open(output_path, "wb") as o:
- input = i.read()
- output = remove(input, session=new_session(model))
- o.write(output)
- return os.path.join(output_path)
-
- interface = gr.Interface(
- inference,
- [
- gr.components.Image(type="filepath", label="Input"),
- gr.components.Dropdown(
- [
- "u2net",
- "u2netp",
- "u2net_human_seg",
- "u2net_cloth_seg",
- "silueta",
- "isnet-general-use",
- "isnet-anime",
- ],
- value="u2net",
- label="Models",
- ),
- ],
- gr.components.Image(type="filepath", label="Output"),
- )
-
- interface.queue(concurrency_count=3)
- app = gr.mount_gradio_app(app, interface, path="/")
- return app
-
- print(f"To access the API documentation, go to http://localhost:{port}/api")
- print(f"To access the UI, go to http://localhost:{port}")
-
- uvicorn.run(gr_app(app), host="0.0.0.0", port=port, log_level=log_level)
diff --git a/spaces/KenjieDec/RemBG/rembg/session_cloth.py b/spaces/KenjieDec/RemBG/rembg/session_cloth.py
deleted file mode 100644
index 11bcef74378be4d64058772c29ac45240f60a85b..0000000000000000000000000000000000000000
--- a/spaces/KenjieDec/RemBG/rembg/session_cloth.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from typing import List
-
-import numpy as np
-from PIL import Image
-from PIL.Image import Image as PILImage
-from scipy.special import log_softmax
-
-from .session_base import BaseSession
-
-pallete1 = [
- 0,
- 0,
- 0,
- 255,
- 255,
- 255,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
-]
-
-pallete2 = [
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 255,
- 255,
- 255,
- 0,
- 0,
- 0,
-]
-
-pallete3 = [
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 255,
- 255,
- 255,
-]
-
-
-class ClothSession(BaseSession):
- def predict(self, img: PILImage) -> List[PILImage]:
- ort_outs = self.inner_session.run(
- None, self.normalize(img, (0.5, 0.5, 0.5), (0.5, 0.5, 0.5), (768, 768))
- )
-
- pred = ort_outs
- pred = log_softmax(pred[0], 1)
- pred = np.argmax(pred, axis=1, keepdims=True)
- pred = np.squeeze(pred, 0)
- pred = np.squeeze(pred, 0)
-
- mask = Image.fromarray(pred.astype("uint8"), mode="L")
- mask = mask.resize(img.size, Image.LANCZOS)
-
- masks = []
-
- mask1 = mask.copy()
- mask1.putpalette(pallete1)
- mask1 = mask1.convert("RGB").convert("L")
- masks.append(mask1)
-
- mask2 = mask.copy()
- mask2.putpalette(pallete2)
- mask2 = mask2.convert("RGB").convert("L")
- masks.append(mask2)
-
- mask3 = mask.copy()
- mask3.putpalette(pallete3)
- mask3 = mask3.convert("RGB").convert("L")
- masks.append(mask3)
-
- return masks
diff --git a/spaces/Kevin676/ChatGPT-with-Smooth-Voice-1.0/app.py b/spaces/Kevin676/ChatGPT-with-Smooth-Voice-1.0/app.py
deleted file mode 100644
index 5836efeec57bd61e4c07db59711d18d2ca380d9d..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/ChatGPT-with-Smooth-Voice-1.0/app.py
+++ /dev/null
@@ -1,123 +0,0 @@
-from TTS.api import TTS
-tts = TTS(model_name="tts_models/multilingual/multi-dataset/your_tts", progress_bar=False, gpu=True)
-import whisper
-model = whisper.load_model("small")
-import os
-os.system('pip install voicefixer --upgrade')
-from voicefixer import VoiceFixer
-voicefixer = VoiceFixer()
-import gradio as gr
-import openai
-import torch
-import torchaudio
-from speechbrain.pretrained import SpectralMaskEnhancement
-
-enhance_model = SpectralMaskEnhancement.from_hparams(
-source="speechbrain/metricgan-plus-voicebank",
-savedir="pretrained_models/metricgan-plus-voicebank",
-run_opts={"device":"cuda"},
-)
-
-mes1 = [
- {"role": "system", "content": "You are a TOEFL examiner. Help me improve my oral Englsih and give me feedback."}
-]
-
-mes2 = [
- {"role": "system", "content": "You are a mental health therapist. Your name is Tina."}
-]
-
-mes3 = [
- {"role": "system", "content": "You are my personal assistant. Your name is Alice."}
-]
-
-res = []
-
-def transcribe(apikey, upload, audio, choice1):
-
- openai.api_key = apikey
-
- # load audio and pad/trim it to fit 30 seconds
- audio = whisper.load_audio(audio)
- audio = whisper.pad_or_trim(audio)
-
- # make log-Mel spectrogram and move to the same device as the model
- mel = whisper.log_mel_spectrogram(audio).to(model.device)
-
- # detect the spoken language
- _, probs = model.detect_language(mel)
- print(f"Detected language: {max(probs, key=probs.get)}")
-
- # decode the audio
- options = whisper.DecodingOptions()
- result = whisper.decode(model, mel, options)
- res.append(result.text)
-
- if choice1 == "TOEFL":
- messages = mes1
- elif choice1 == "Therapist":
- messages = mes2
- elif choice1 == "Alice":
- messages = mes3
-
- # chatgpt
- n = len(res)
- content = res[n-1]
- messages.append({"role": "user", "content": content})
-
- completion = openai.ChatCompletion.create(
- model = "gpt-3.5-turbo",
- messages = messages
- )
-
- chat_response = completion.choices[0].message.content
-
- messages.append({"role": "assistant", "content": chat_response})
-
- tts.tts_to_file(chat_response, speaker_wav = upload, language="en", file_path="output.wav")
-
- voicefixer.restore(input="output.wav", # input wav file path
- output="audio1.wav", # output wav file path
- cuda=True, # whether to use gpu acceleration
- mode = 0) # You can try out mode 0, 1, or 2 to find out the best result
-
- noisy = enhance_model.load_audio(
- "audio1.wav"
- ).unsqueeze(0)
-
- enhanced = enhance_model.enhance_batch(noisy, lengths=torch.tensor([1.]))
- torchaudio.save("enhanced.wav", enhanced.cpu(), 16000)
-
- return [result.text, chat_response, "enhanced.wav"]
-
-c1=gr.Interface(
- fn=transcribe,
- inputs=[
- gr.Textbox(lines=1, label = "请填写您的OpenAI-API-key"),
- gr.Audio(source="upload", label = "请上传您喜欢的声音(wav文件)", type="filepath"),
- gr.Audio(source="microphone", label = "和您的专属AI聊天吧!", type="filepath"),
- gr.Radio(["TOEFL", "Therapist", "Alice"], label="TOEFL Examiner, Therapist Tina, or Assistant Alice?"),
- ],
- outputs=[
- gr.Textbox(label="Speech to Text"), gr.Textbox(label="ChatGPT Output"), gr.Audio(label="Audio with Custom Voice"),
- ],
- #theme="huggingface",
- description = "🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!",
- )
-
-c2=gr.Interface(
- fn=transcribe,
- inputs=[
- gr.Textbox(lines=1, label = "请填写您的OpenAI-API-key"),
- gr.Audio(source="microphone", label = "请上传您喜欢的声音,并尽量避免噪音", type="filepath"),
- gr.Audio(source="microphone", label = "和您的专属AI聊天吧!", type="filepath"),
- gr.Radio(["TOEFL", "Therapist", "Alice"], label="TOEFL Examiner, Therapist Tina, or Assistant Alice?"),
- ],
- outputs=[
- gr.Textbox(label="Speech to Text"), gr.Textbox(label="ChatGPT Output"), gr.Audio(label="Audio with Custom Voice"),
- ],
- #theme="huggingface",
- description = "🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!TalktoAI - Enable the future!",
- )
-
-demo = gr.TabbedInterface([c1, c2], ["wav文件上传", "麦克风上传"], title = '🥳💬💕 - TalktoAI,随时随地,谈天说地!')
-demo.launch()
\ No newline at end of file
diff --git a/spaces/Kevin676/Real-Time-Voice-Cloning/vocoder/distribution.py b/spaces/Kevin676/Real-Time-Voice-Cloning/vocoder/distribution.py
deleted file mode 100644
index d3119a5ba1e77bc25a92d2664f83d366f12399c0..0000000000000000000000000000000000000000
--- a/spaces/Kevin676/Real-Time-Voice-Cloning/vocoder/distribution.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import numpy as np
-import torch
-import torch.nn.functional as F
-
-
-def log_sum_exp(x):
- """ numerically stable log_sum_exp implementation that prevents overflow """
- # TF ordering
- axis = len(x.size()) - 1
- m, _ = torch.max(x, dim=axis)
- m2, _ = torch.max(x, dim=axis, keepdim=True)
- return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))
-
-
-# It is adapted from https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py
-def discretized_mix_logistic_loss(y_hat, y, num_classes=65536,
- log_scale_min=None, reduce=True):
- if log_scale_min is None:
- log_scale_min = float(np.log(1e-14))
- y_hat = y_hat.permute(0,2,1)
- assert y_hat.dim() == 3
- assert y_hat.size(1) % 3 == 0
- nr_mix = y_hat.size(1) // 3
-
- # (B x T x C)
- y_hat = y_hat.transpose(1, 2)
-
- # unpack parameters. (B, T, num_mixtures) x 3
- logit_probs = y_hat[:, :, :nr_mix]
- means = y_hat[:, :, nr_mix:2 * nr_mix]
- log_scales = torch.clamp(y_hat[:, :, 2 * nr_mix:3 * nr_mix], min=log_scale_min)
-
- # B x T x 1 -> B x T x num_mixtures
- y = y.expand_as(means)
-
- centered_y = y - means
- inv_stdv = torch.exp(-log_scales)
- plus_in = inv_stdv * (centered_y + 1. / (num_classes - 1))
- cdf_plus = torch.sigmoid(plus_in)
- min_in = inv_stdv * (centered_y - 1. / (num_classes - 1))
- cdf_min = torch.sigmoid(min_in)
-
- # log probability for edge case of 0 (before scaling)
- # equivalent: torch.log(F.sigmoid(plus_in))
- log_cdf_plus = plus_in - F.softplus(plus_in)
-
- # log probability for edge case of 255 (before scaling)
- # equivalent: (1 - F.sigmoid(min_in)).log()
- log_one_minus_cdf_min = -F.softplus(min_in)
-
- # probability for all other cases
- cdf_delta = cdf_plus - cdf_min
-
- mid_in = inv_stdv * centered_y
- # log probability in the center of the bin, to be used in extreme cases
- # (not actually used in our code)
- log_pdf_mid = mid_in - log_scales - 2. * F.softplus(mid_in)
-
- # tf equivalent
- """
- log_probs = tf.where(x < -0.999, log_cdf_plus,
- tf.where(x > 0.999, log_one_minus_cdf_min,
- tf.where(cdf_delta > 1e-5,
- tf.log(tf.maximum(cdf_delta, 1e-12)),
- log_pdf_mid - np.log(127.5))))
- """
- # TODO: cdf_delta <= 1e-5 actually can happen. How can we choose the value
- # for num_classes=65536 case? 1e-7? not sure..
- inner_inner_cond = (cdf_delta > 1e-5).float()
-
- inner_inner_out = inner_inner_cond * \
- torch.log(torch.clamp(cdf_delta, min=1e-12)) + \
- (1. - inner_inner_cond) * (log_pdf_mid - np.log((num_classes - 1) / 2))
- inner_cond = (y > 0.999).float()
- inner_out = inner_cond * log_one_minus_cdf_min + (1. - inner_cond) * inner_inner_out
- cond = (y < -0.999).float()
- log_probs = cond * log_cdf_plus + (1. - cond) * inner_out
-
- log_probs = log_probs + F.log_softmax(logit_probs, -1)
-
- if reduce:
- return -torch.mean(log_sum_exp(log_probs))
- else:
- return -log_sum_exp(log_probs).unsqueeze(-1)
-
-
-def sample_from_discretized_mix_logistic(y, log_scale_min=None):
- """
- Sample from discretized mixture of logistic distributions
- Args:
- y (Tensor): B x C x T
- log_scale_min (float): Log scale minimum value
- Returns:
- Tensor: sample in range of [-1, 1].
- """
- if log_scale_min is None:
- log_scale_min = float(np.log(1e-14))
- assert y.size(1) % 3 == 0
- nr_mix = y.size(1) // 3
-
- # B x T x C
- y = y.transpose(1, 2)
- logit_probs = y[:, :, :nr_mix]
-
- # sample mixture indicator from softmax
- temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5)
- temp = logit_probs.data - torch.log(- torch.log(temp))
- _, argmax = temp.max(dim=-1)
-
- # (B, T) -> (B, T, nr_mix)
- one_hot = to_one_hot(argmax, nr_mix)
- # select logistic parameters
- means = torch.sum(y[:, :, nr_mix:2 * nr_mix] * one_hot, dim=-1)
- log_scales = torch.clamp(torch.sum(
- y[:, :, 2 * nr_mix:3 * nr_mix] * one_hot, dim=-1), min=log_scale_min)
- # sample from logistic & clip to interval
- # we don't actually round to the nearest 8bit value when sampling
- u = means.data.new(means.size()).uniform_(1e-5, 1.0 - 1e-5)
- x = means + torch.exp(log_scales) * (torch.log(u) - torch.log(1. - u))
-
- x = torch.clamp(torch.clamp(x, min=-1.), max=1.)
-
- return x
-
-
-def to_one_hot(tensor, n, fill_with=1.):
- # we perform one hot encore with respect to the last axis
- one_hot = torch.FloatTensor(tensor.size() + (n,)).zero_()
- if tensor.is_cuda:
- one_hot = one_hot.cuda()
- one_hot.scatter_(len(tensor.size()), tensor.unsqueeze(-1), fill_with)
- return one_hot
diff --git a/spaces/KyanChen/FunSR/datasets/inr_sr_wrappers.py b/spaces/KyanChen/FunSR/datasets/inr_sr_wrappers.py
deleted file mode 100644
index 9cd3be7d2f1255eb0cd1788883414550c0ce4298..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/FunSR/datasets/inr_sr_wrappers.py
+++ /dev/null
@@ -1,177 +0,0 @@
-import functools
-import os
-import random
-import math
-from PIL import Image
-
-import numpy as np
-import torch
-from einops import rearrange
-from torch.utils.data import Dataset
-from torchvision import transforms
-
-from datasets import register
-from utils import to_pixel_samples, to_coordinates
-
-import torchvision.transforms.functional as TF
-import random
-from typing import Sequence
-
-
-class MyRotateTransform:
- def __init__(self, angles: Sequence[int], p=0.5):
- self.angles = angles
- self.p = p
-
- def __call__(self, x):
- if torch.rand(1) < self.p:
- return x
- angle = random.choice(self.angles)
- return TF.rotate(x, angle)
-
-@register('inr_fixed_scale_sr_warp')
-class INRFixedScaleSRWarp(Dataset):
- def __init__(self,
- dataset, scale_ratio, patch_size=48,
- augment=False, sample_q=None,
- val_mode=False, test_mode=False,
- encode_scale_ratio=False,
- return_cell=False, # for liff
- ):
- super(INRFixedScaleSRWarp, self).__init__()
- self.dataset = dataset
- self.scale_ratio = scale_ratio
- self.patch_size = patch_size
- self.hr_size = int(patch_size * scale_ratio)
- self.augment = augment
- self.sample_q = sample_q
- self.test_mode = test_mode
- self.val_mode = val_mode
- self.encode_scale_ratio = encode_scale_ratio
- self.return_cell = return_cell
-
- def __len__(self):
- return len(self.dataset)
-
- def __getitem__(self, idx):
- # import pdb
- # pdb.set_trace()
- img_hr, file_name = self.dataset[idx]
- class_name = os.path.basename(os.path.dirname(file_name))
- file_name = os.path.basename(file_name).split('.')[0]
- # img_hr: 3xHxW
- h, w = img_hr.shape[-2:]
- # if h < 256 or w < 256:
- # img_hr = transforms.Resize(256, Image.BICUBIC)(img_hr)
-
- if self.test_mode or self.val_mode:
- img_hr = transforms.CenterCrop(self.hr_size)(img_hr)
- else:
- img_hr = transforms.RandomCrop(self.hr_size)(img_hr)
- if self.augment:
- img_hr = transforms.RandomHorizontalFlip(p=0.5)(img_hr)
- img_hr = transforms.RandomVerticalFlip(p=0.5)(img_hr)
- img_hr = MyRotateTransform([90, 180, 270], p=0.5)(img_hr)
-
- img_lr = transforms.Resize(self.patch_size, Image.BICUBIC)(img_hr)
-
- hr_coord = to_coordinates(size=img_hr.shape[-2:], return_map=False)
- hr_rgb = rearrange(img_hr, 'C H W -> (H W) C')
-
- if self.sample_q is not None and not self.test_mode:
- sample_lst = np.random.choice(
- len(hr_coord), self.sample_q, replace=False)
- hr_coord = hr_coord[sample_lst]
- hr_rgb = hr_rgb[sample_lst]
- return_dict = {
- 'inp': img_lr,
- 'coord': hr_coord,
- 'gt': hr_rgb,
- 'class_name': class_name,
- 'filename': file_name
- }
-
- if self.encode_scale_ratio:
- scale_ratio = torch.ones_like(hr_coord) * self.patch_size / self.hr_size
- return_dict['scale_ratio'] = scale_ratio
-
- if self.return_cell:
- cell = torch.ones_like(hr_coord)
- cell[:, 0] *= 2 / img_hr.shape[-2]
- cell[:, 1] *= 2 / img_hr.shape[-1]
- return_dict['cell'] = cell
-
- return return_dict
-
-
-@register('inr_range_scale_sr_warp')
-class INRRangeScaleSRWarp(Dataset):
- def __init__(self,
- dataset, max_scale_ratio, patch_size=48,
- augment=False, sample_q=None,
- val_mode=False, test_mode=False,
- encode_scale_ratio=False,
- return_cell=False, # for liff
- ):
- super(INRRangeScaleSRWarp, self).__init__()
- self.dataset = dataset
- self.max_scale_ratio = max_scale_ratio
- self.patch_size = patch_size
- assert max_scale_ratio <= 8
- self.augment = augment
- self.sample_q = sample_q
- self.test_mode = test_mode
- self.val_mode = val_mode
- self.encode_scale_ratio = encode_scale_ratio
- self.return_cell = return_cell
-
- def __len__(self):
- return len(self.dataset)
-
- def __getitem__(self, idx):
- img_hr, file_name = self.dataset[idx]
- class_name = os.path.basename(os.path.dirname(file_name))
- h, w = img_hr.shape[-2:]
- # if h < 256 or w < 256:
- # img_hr = transforms.Resize(256, Image.BICUBIC)(img_hr)
-
- hr_size = self.patch_size + self.patch_size * torch.rand([]) * (self.max_scale_ratio - 1)
- hr_size = int(hr_size)
-
- if self.test_mode or self.val_mode:
- hr_size = int(self.patch_size * self.max_scale_ratio)
- img_hr = transforms.CenterCrop(hr_size)(img_hr)
- else:
- img_hr = transforms.RandomCrop(hr_size)(img_hr)
- if self.augment:
- img_hr = transforms.RandomHorizontalFlip(p=0.5)(img_hr)
- img_hr = transforms.RandomVerticalFlip(p=0.5)(img_hr)
- img_hr = MyRotateTransform([90, 180, 270], p=0.5)(img_hr)
-
- img_lr = transforms.Resize(self.patch_size, Image.BICUBIC)(img_hr)
-
- hr_coord = to_coordinates(size=img_hr.shape[-2:], return_map=False)
- hr_rgb = rearrange(img_hr, 'C H W -> (H W) C')
-
- if self.sample_q is not None and not self.test_mode:
- sample_lst = np.random.choice(
- len(hr_coord), self.sample_q, replace=False)
- hr_coord = hr_coord[sample_lst]
- hr_rgb = hr_rgb[sample_lst]
- return_dict = {
- 'inp': img_lr,
- 'coord': hr_coord,
- 'gt': hr_rgb,
- 'class_name': class_name
- }
- if self.encode_scale_ratio:
- scale_ratio = torch.ones_like(hr_coord) * self.patch_size / hr_size
- return_dict['scale_ratio'] = scale_ratio
-
- if self.return_cell:
- cell = torch.ones_like(hr_coord)
- cell[:, 0] *= 2 / img_hr.shape[-2]
- cell[:, 1] *= 2 / img_hr.shape[-1]
- return_dict['cell'] = cell
-
- return return_dict
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/losses/ghm_loss.py b/spaces/KyanChen/RSPrompter/mmdet/models/losses/ghm_loss.py
deleted file mode 100644
index a874c0038cc4a77769705a3a06a95a56d3e8dd2d..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/losses/ghm_loss.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from mmdet.registry import MODELS
-from .utils import weight_reduce_loss
-
-
-def _expand_onehot_labels(labels, label_weights, label_channels):
- bin_labels = labels.new_full((labels.size(0), label_channels), 0)
- inds = torch.nonzero(
- (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
- if inds.numel() > 0:
- bin_labels[inds, labels[inds]] = 1
- bin_label_weights = label_weights.view(-1, 1).expand(
- label_weights.size(0), label_channels)
- return bin_labels, bin_label_weights
-
-
-# TODO: code refactoring to make it consistent with other losses
-@MODELS.register_module()
-class GHMC(nn.Module):
- """GHM Classification Loss.
-
- Details of the theorem can be viewed in the paper
- `Gradient Harmonized Single-stage Detector
- `_.
-
- Args:
- bins (int): Number of the unit regions for distribution calculation.
- momentum (float): The parameter for moving average.
- use_sigmoid (bool): Can only be true for BCE based loss now.
- loss_weight (float): The weight of the total GHM-C loss.
- reduction (str): Options are "none", "mean" and "sum".
- Defaults to "mean"
- """
-
- def __init__(self,
- bins=10,
- momentum=0,
- use_sigmoid=True,
- loss_weight=1.0,
- reduction='mean'):
- super(GHMC, self).__init__()
- self.bins = bins
- self.momentum = momentum
- edges = torch.arange(bins + 1).float() / bins
- self.register_buffer('edges', edges)
- self.edges[-1] += 1e-6
- if momentum > 0:
- acc_sum = torch.zeros(bins)
- self.register_buffer('acc_sum', acc_sum)
- self.use_sigmoid = use_sigmoid
- if not self.use_sigmoid:
- raise NotImplementedError
- self.loss_weight = loss_weight
- self.reduction = reduction
-
- def forward(self,
- pred,
- target,
- label_weight,
- reduction_override=None,
- **kwargs):
- """Calculate the GHM-C loss.
-
- Args:
- pred (float tensor of size [batch_num, class_num]):
- The direct prediction of classification fc layer.
- target (float tensor of size [batch_num, class_num]):
- Binary class target for each sample.
- label_weight (float tensor of size [batch_num, class_num]):
- the value is 1 if the sample is valid and 0 if ignored.
- reduction_override (str, optional): The reduction method used to
- override the original reduction method of the loss.
- Defaults to None.
- Returns:
- The gradient harmonized loss.
- """
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- # the target should be binary class label
- if pred.dim() != target.dim():
- target, label_weight = _expand_onehot_labels(
- target, label_weight, pred.size(-1))
- target, label_weight = target.float(), label_weight.float()
- edges = self.edges
- mmt = self.momentum
- weights = torch.zeros_like(pred)
-
- # gradient length
- g = torch.abs(pred.sigmoid().detach() - target)
-
- valid = label_weight > 0
- tot = max(valid.float().sum().item(), 1.0)
- n = 0 # n valid bins
- for i in range(self.bins):
- inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
- num_in_bin = inds.sum().item()
- if num_in_bin > 0:
- if mmt > 0:
- self.acc_sum[i] = mmt * self.acc_sum[i] \
- + (1 - mmt) * num_in_bin
- weights[inds] = tot / self.acc_sum[i]
- else:
- weights[inds] = tot / num_in_bin
- n += 1
- if n > 0:
- weights = weights / n
-
- loss = F.binary_cross_entropy_with_logits(
- pred, target, reduction='none')
- loss = weight_reduce_loss(
- loss, weights, reduction=reduction, avg_factor=tot)
- return loss * self.loss_weight
-
-
-# TODO: code refactoring to make it consistent with other losses
-@MODELS.register_module()
-class GHMR(nn.Module):
- """GHM Regression Loss.
-
- Details of the theorem can be viewed in the paper
- `Gradient Harmonized Single-stage Detector
- `_.
-
- Args:
- mu (float): The parameter for the Authentic Smooth L1 loss.
- bins (int): Number of the unit regions for distribution calculation.
- momentum (float): The parameter for moving average.
- loss_weight (float): The weight of the total GHM-R loss.
- reduction (str): Options are "none", "mean" and "sum".
- Defaults to "mean"
- """
-
- def __init__(self,
- mu=0.02,
- bins=10,
- momentum=0,
- loss_weight=1.0,
- reduction='mean'):
- super(GHMR, self).__init__()
- self.mu = mu
- self.bins = bins
- edges = torch.arange(bins + 1).float() / bins
- self.register_buffer('edges', edges)
- self.edges[-1] = 1e3
- self.momentum = momentum
- if momentum > 0:
- acc_sum = torch.zeros(bins)
- self.register_buffer('acc_sum', acc_sum)
- self.loss_weight = loss_weight
- self.reduction = reduction
-
- # TODO: support reduction parameter
- def forward(self,
- pred,
- target,
- label_weight,
- avg_factor=None,
- reduction_override=None):
- """Calculate the GHM-R loss.
-
- Args:
- pred (float tensor of size [batch_num, 4 (* class_num)]):
- The prediction of box regression layer. Channel number can be 4
- or 4 * class_num depending on whether it is class-agnostic.
- target (float tensor of size [batch_num, 4 (* class_num)]):
- The target regression values with the same size of pred.
- label_weight (float tensor of size [batch_num, 4 (* class_num)]):
- The weight of each sample, 0 if ignored.
- reduction_override (str, optional): The reduction method used to
- override the original reduction method of the loss.
- Defaults to None.
- Returns:
- The gradient harmonized loss.
- """
- assert reduction_override in (None, 'none', 'mean', 'sum')
- reduction = (
- reduction_override if reduction_override else self.reduction)
- mu = self.mu
- edges = self.edges
- mmt = self.momentum
-
- # ASL1 loss
- diff = pred - target
- loss = torch.sqrt(diff * diff + mu * mu) - mu
-
- # gradient length
- g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
- weights = torch.zeros_like(g)
-
- valid = label_weight > 0
- tot = max(label_weight.float().sum().item(), 1.0)
- n = 0 # n: valid bins
- for i in range(self.bins):
- inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
- num_in_bin = inds.sum().item()
- if num_in_bin > 0:
- n += 1
- if mmt > 0:
- self.acc_sum[i] = mmt * self.acc_sum[i] \
- + (1 - mmt) * num_in_bin
- weights[inds] = tot / self.acc_sum[i]
- else:
- weights[inds] = tot / num_in_bin
- if n > 0:
- weights /= n
- loss = weight_reduce_loss(
- loss, weights, reduction=reduction, avg_factor=tot)
- return loss * self.loss_weight
diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/bbox_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/bbox_head.py
deleted file mode 100644
index db9377990201d9f59d6d683f6496610d9aebf3f8..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/bbox_head.py
+++ /dev/null
@@ -1,708 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from typing import List, Optional, Tuple, Union
-
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from mmengine.config import ConfigDict
-from mmengine.model import BaseModule
-from mmengine.structures import InstanceData
-from torch import Tensor
-from torch.nn.modules.utils import _pair
-
-from mmdet.models.layers import multiclass_nms
-from mmdet.models.losses import accuracy
-from mmdet.models.task_modules.samplers import SamplingResult
-from mmdet.models.utils import empty_instances, multi_apply
-from mmdet.registry import MODELS, TASK_UTILS
-from mmdet.structures.bbox import get_box_tensor, scale_boxes
-from mmdet.utils import ConfigType, InstanceList, OptMultiConfig
-
-
-@MODELS.register_module()
-class BBoxHead(BaseModule):
- """Simplest RoI head, with only two fc layers for classification and
- regression respectively."""
-
- def __init__(self,
- with_avg_pool: bool = False,
- with_cls: bool = True,
- with_reg: bool = True,
- roi_feat_size: int = 7,
- in_channels: int = 256,
- num_classes: int = 80,
- bbox_coder: ConfigType = dict(
- type='DeltaXYWHBBoxCoder',
- clip_border=True,
- target_means=[0., 0., 0., 0.],
- target_stds=[0.1, 0.1, 0.2, 0.2]),
- predict_box_type: str = 'hbox',
- reg_class_agnostic: bool = False,
- reg_decoded_bbox: bool = False,
- reg_predictor_cfg: ConfigType = dict(type='mmdet.Linear'),
- cls_predictor_cfg: ConfigType = dict(type='mmdet.Linear'),
- loss_cls: ConfigType = dict(
- type='CrossEntropyLoss',
- use_sigmoid=False,
- loss_weight=1.0),
- loss_bbox: ConfigType = dict(
- type='SmoothL1Loss', beta=1.0, loss_weight=1.0),
- init_cfg: OptMultiConfig = None) -> None:
- super().__init__(init_cfg=init_cfg)
- assert with_cls or with_reg
- self.with_avg_pool = with_avg_pool
- self.with_cls = with_cls
- self.with_reg = with_reg
- self.roi_feat_size = _pair(roi_feat_size)
- self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
- self.in_channels = in_channels
- self.num_classes = num_classes
- self.predict_box_type = predict_box_type
- self.reg_class_agnostic = reg_class_agnostic
- self.reg_decoded_bbox = reg_decoded_bbox
- self.reg_predictor_cfg = reg_predictor_cfg
- self.cls_predictor_cfg = cls_predictor_cfg
-
- self.bbox_coder = TASK_UTILS.build(bbox_coder)
- self.loss_cls = MODELS.build(loss_cls)
- self.loss_bbox = MODELS.build(loss_bbox)
-
- in_channels = self.in_channels
- if self.with_avg_pool:
- self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
- else:
- in_channels *= self.roi_feat_area
- if self.with_cls:
- # need to add background class
- if self.custom_cls_channels:
- cls_channels = self.loss_cls.get_cls_channels(self.num_classes)
- else:
- cls_channels = num_classes + 1
- cls_predictor_cfg_ = self.cls_predictor_cfg.copy()
- cls_predictor_cfg_.update(
- in_features=in_channels, out_features=cls_channels)
- self.fc_cls = MODELS.build(cls_predictor_cfg_)
- if self.with_reg:
- box_dim = self.bbox_coder.encode_size
- out_dim_reg = box_dim if reg_class_agnostic else \
- box_dim * num_classes
- reg_predictor_cfg_ = self.reg_predictor_cfg.copy()
- if isinstance(reg_predictor_cfg_, (dict, ConfigDict)):
- reg_predictor_cfg_.update(
- in_features=in_channels, out_features=out_dim_reg)
- self.fc_reg = MODELS.build(reg_predictor_cfg_)
- self.debug_imgs = None
- if init_cfg is None:
- self.init_cfg = []
- if self.with_cls:
- self.init_cfg += [
- dict(
- type='Normal', std=0.01, override=dict(name='fc_cls'))
- ]
- if self.with_reg:
- self.init_cfg += [
- dict(
- type='Normal', std=0.001, override=dict(name='fc_reg'))
- ]
-
- # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead
- @property
- def custom_cls_channels(self) -> bool:
- """get custom_cls_channels from loss_cls."""
- return getattr(self.loss_cls, 'custom_cls_channels', False)
-
- # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead
- @property
- def custom_activation(self) -> bool:
- """get custom_activation from loss_cls."""
- return getattr(self.loss_cls, 'custom_activation', False)
-
- # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead
- @property
- def custom_accuracy(self) -> bool:
- """get custom_accuracy from loss_cls."""
- return getattr(self.loss_cls, 'custom_accuracy', False)
-
- def forward(self, x: Tuple[Tensor]) -> tuple:
- """Forward features from the upstream network.
-
- Args:
- x (tuple[Tensor]): Features from the upstream network, each is
- a 4D-tensor.
-
- Returns:
- tuple: A tuple of classification scores and bbox prediction.
-
- - cls_score (Tensor): Classification scores for all
- scale levels, each is a 4D-tensor, the channels number
- is num_base_priors * num_classes.
- - bbox_pred (Tensor): Box energies / deltas for all
- scale levels, each is a 4D-tensor, the channels number
- is num_base_priors * 4.
- """
- if self.with_avg_pool:
- if x.numel() > 0:
- x = self.avg_pool(x)
- x = x.view(x.size(0), -1)
- else:
- # avg_pool does not support empty tensor,
- # so use torch.mean instead it
- x = torch.mean(x, dim=(-1, -2))
- cls_score = self.fc_cls(x) if self.with_cls else None
- bbox_pred = self.fc_reg(x) if self.with_reg else None
- return cls_score, bbox_pred
-
- def _get_targets_single(self, pos_priors: Tensor, neg_priors: Tensor,
- pos_gt_bboxes: Tensor, pos_gt_labels: Tensor,
- cfg: ConfigDict) -> tuple:
- """Calculate the ground truth for proposals in the single image
- according to the sampling results.
-
- Args:
- pos_priors (Tensor): Contains all the positive boxes,
- has shape (num_pos, 4), the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- neg_priors (Tensor): Contains all the negative boxes,
- has shape (num_neg, 4), the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- pos_gt_bboxes (Tensor): Contains gt_boxes for
- all positive samples, has shape (num_pos, 4),
- the last dimension 4
- represents [tl_x, tl_y, br_x, br_y].
- pos_gt_labels (Tensor): Contains gt_labels for
- all positive samples, has shape (num_pos, ).
- cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
-
- Returns:
- Tuple[Tensor]: Ground truth for proposals
- in a single image. Containing the following Tensors:
-
- - labels(Tensor): Gt_labels for all proposals, has
- shape (num_proposals,).
- - label_weights(Tensor): Labels_weights for all
- proposals, has shape (num_proposals,).
- - bbox_targets(Tensor):Regression target for all
- proposals, has shape (num_proposals, 4), the
- last dimension 4 represents [tl_x, tl_y, br_x, br_y].
- - bbox_weights(Tensor):Regression weights for all
- proposals, has shape (num_proposals, 4).
- """
- num_pos = pos_priors.size(0)
- num_neg = neg_priors.size(0)
- num_samples = num_pos + num_neg
-
- # original implementation uses new_zeros since BG are set to be 0
- # now use empty & fill because BG cat_id = num_classes,
- # FG cat_id = [0, num_classes-1]
- labels = pos_priors.new_full((num_samples, ),
- self.num_classes,
- dtype=torch.long)
- reg_dim = pos_gt_bboxes.size(-1) if self.reg_decoded_bbox \
- else self.bbox_coder.encode_size
- label_weights = pos_priors.new_zeros(num_samples)
- bbox_targets = pos_priors.new_zeros(num_samples, reg_dim)
- bbox_weights = pos_priors.new_zeros(num_samples, reg_dim)
- if num_pos > 0:
- labels[:num_pos] = pos_gt_labels
- pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
- label_weights[:num_pos] = pos_weight
- if not self.reg_decoded_bbox:
- pos_bbox_targets = self.bbox_coder.encode(
- pos_priors, pos_gt_bboxes)
- else:
- # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
- # is applied directly on the decoded bounding boxes, both
- # the predicted boxes and regression targets should be with
- # absolute coordinate format.
- pos_bbox_targets = get_box_tensor(pos_gt_bboxes)
- bbox_targets[:num_pos, :] = pos_bbox_targets
- bbox_weights[:num_pos, :] = 1
- if num_neg > 0:
- label_weights[-num_neg:] = 1.0
-
- return labels, label_weights, bbox_targets, bbox_weights
-
- def get_targets(self,
- sampling_results: List[SamplingResult],
- rcnn_train_cfg: ConfigDict,
- concat: bool = True) -> tuple:
- """Calculate the ground truth for all samples in a batch according to
- the sampling_results.
-
- Almost the same as the implementation in bbox_head, we passed
- additional parameters pos_inds_list and neg_inds_list to
- `_get_targets_single` function.
-
- Args:
- sampling_results (List[obj:SamplingResult]): Assign results of
- all images in a batch after sampling.
- rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
- concat (bool): Whether to concatenate the results of all
- the images in a single batch.
-
- Returns:
- Tuple[Tensor]: Ground truth for proposals in a single image.
- Containing the following list of Tensors:
-
- - labels (list[Tensor],Tensor): Gt_labels for all
- proposals in a batch, each tensor in list has
- shape (num_proposals,) when `concat=False`, otherwise
- just a single tensor has shape (num_all_proposals,).
- - label_weights (list[Tensor]): Labels_weights for
- all proposals in a batch, each tensor in list has
- shape (num_proposals,) when `concat=False`, otherwise
- just a single tensor has shape (num_all_proposals,).
- - bbox_targets (list[Tensor],Tensor): Regression target
- for all proposals in a batch, each tensor in list
- has shape (num_proposals, 4) when `concat=False`,
- otherwise just a single tensor has shape
- (num_all_proposals, 4), the last dimension 4 represents
- [tl_x, tl_y, br_x, br_y].
- - bbox_weights (list[tensor],Tensor): Regression weights for
- all proposals in a batch, each tensor in list has shape
- (num_proposals, 4) when `concat=False`, otherwise just a
- single tensor has shape (num_all_proposals, 4).
- """
- pos_priors_list = [res.pos_priors for res in sampling_results]
- neg_priors_list = [res.neg_priors for res in sampling_results]
- pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
- pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
- labels, label_weights, bbox_targets, bbox_weights = multi_apply(
- self._get_targets_single,
- pos_priors_list,
- neg_priors_list,
- pos_gt_bboxes_list,
- pos_gt_labels_list,
- cfg=rcnn_train_cfg)
-
- if concat:
- labels = torch.cat(labels, 0)
- label_weights = torch.cat(label_weights, 0)
- bbox_targets = torch.cat(bbox_targets, 0)
- bbox_weights = torch.cat(bbox_weights, 0)
- return labels, label_weights, bbox_targets, bbox_weights
-
- def loss_and_target(self,
- cls_score: Tensor,
- bbox_pred: Tensor,
- rois: Tensor,
- sampling_results: List[SamplingResult],
- rcnn_train_cfg: ConfigDict,
- concat: bool = True,
- reduction_override: Optional[str] = None) -> dict:
- """Calculate the loss based on the features extracted by the bbox head.
-
- Args:
- cls_score (Tensor): Classification prediction
- results of all class, has shape
- (batch_size * num_proposals_single_image, num_classes)
- bbox_pred (Tensor): Regression prediction results,
- has shape
- (batch_size * num_proposals_single_image, 4), the last
- dimension 4 represents [tl_x, tl_y, br_x, br_y].
- rois (Tensor): RoIs with the shape
- (batch_size * num_proposals_single_image, 5) where the first
- column indicates batch id of each RoI.
- sampling_results (List[obj:SamplingResult]): Assign results of
- all images in a batch after sampling.
- rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
- concat (bool): Whether to concatenate the results of all
- the images in a single batch. Defaults to True.
- reduction_override (str, optional): The reduction
- method used to override the original reduction
- method of the loss. Options are "none",
- "mean" and "sum". Defaults to None,
-
- Returns:
- dict: A dictionary of loss and targets components.
- The targets are only used for cascade rcnn.
- """
-
- cls_reg_targets = self.get_targets(
- sampling_results, rcnn_train_cfg, concat=concat)
- losses = self.loss(
- cls_score,
- bbox_pred,
- rois,
- *cls_reg_targets,
- reduction_override=reduction_override)
-
- # cls_reg_targets is only for cascade rcnn
- return dict(loss_bbox=losses, bbox_targets=cls_reg_targets)
-
- def loss(self,
- cls_score: Tensor,
- bbox_pred: Tensor,
- rois: Tensor,
- labels: Tensor,
- label_weights: Tensor,
- bbox_targets: Tensor,
- bbox_weights: Tensor,
- reduction_override: Optional[str] = None) -> dict:
- """Calculate the loss based on the network predictions and targets.
-
- Args:
- cls_score (Tensor): Classification prediction
- results of all class, has shape
- (batch_size * num_proposals_single_image, num_classes)
- bbox_pred (Tensor): Regression prediction results,
- has shape
- (batch_size * num_proposals_single_image, 4), the last
- dimension 4 represents [tl_x, tl_y, br_x, br_y].
- rois (Tensor): RoIs with the shape
- (batch_size * num_proposals_single_image, 5) where the first
- column indicates batch id of each RoI.
- labels (Tensor): Gt_labels for all proposals in a batch, has
- shape (batch_size * num_proposals_single_image, ).
- label_weights (Tensor): Labels_weights for all proposals in a
- batch, has shape (batch_size * num_proposals_single_image, ).
- bbox_targets (Tensor): Regression target for all proposals in a
- batch, has shape (batch_size * num_proposals_single_image, 4),
- the last dimension 4 represents [tl_x, tl_y, br_x, br_y].
- bbox_weights (Tensor): Regression weights for all proposals in a
- batch, has shape (batch_size * num_proposals_single_image, 4).
- reduction_override (str, optional): The reduction
- method used to override the original reduction
- method of the loss. Options are "none",
- "mean" and "sum". Defaults to None,
-
- Returns:
- dict: A dictionary of loss.
- """
-
- losses = dict()
-
- if cls_score is not None:
- avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
- if cls_score.numel() > 0:
- loss_cls_ = self.loss_cls(
- cls_score,
- labels,
- label_weights,
- avg_factor=avg_factor,
- reduction_override=reduction_override)
- if isinstance(loss_cls_, dict):
- losses.update(loss_cls_)
- else:
- losses['loss_cls'] = loss_cls_
- if self.custom_activation:
- acc_ = self.loss_cls.get_accuracy(cls_score, labels)
- losses.update(acc_)
- else:
- losses['acc'] = accuracy(cls_score, labels)
- if bbox_pred is not None:
- bg_class_ind = self.num_classes
- # 0~self.num_classes-1 are FG, self.num_classes is BG
- pos_inds = (labels >= 0) & (labels < bg_class_ind)
- # do not perform bounding box regression for BG anymore.
- if pos_inds.any():
- if self.reg_decoded_bbox:
- # When the regression loss (e.g. `IouLoss`,
- # `GIouLoss`, `DIouLoss`) is applied directly on
- # the decoded bounding boxes, it decodes the
- # already encoded coordinates to absolute format.
- bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
- bbox_pred = get_box_tensor(bbox_pred)
- if self.reg_class_agnostic:
- pos_bbox_pred = bbox_pred.view(
- bbox_pred.size(0), -1)[pos_inds.type(torch.bool)]
- else:
- pos_bbox_pred = bbox_pred.view(
- bbox_pred.size(0), self.num_classes,
- -1)[pos_inds.type(torch.bool),
- labels[pos_inds.type(torch.bool)]]
- losses['loss_bbox'] = self.loss_bbox(
- pos_bbox_pred,
- bbox_targets[pos_inds.type(torch.bool)],
- bbox_weights[pos_inds.type(torch.bool)],
- avg_factor=bbox_targets.size(0),
- reduction_override=reduction_override)
- else:
- losses['loss_bbox'] = bbox_pred[pos_inds].sum()
-
- return losses
-
- def predict_by_feat(self,
- rois: Tuple[Tensor],
- cls_scores: Tuple[Tensor],
- bbox_preds: Tuple[Tensor],
- batch_img_metas: List[dict],
- rcnn_test_cfg: Optional[ConfigDict] = None,
- rescale: bool = False) -> InstanceList:
- """Transform a batch of output features extracted from the head into
- bbox results.
-
- Args:
- rois (tuple[Tensor]): Tuple of boxes to be transformed.
- Each has shape (num_boxes, 5). last dimension 5 arrange as
- (batch_index, x1, y1, x2, y2).
- cls_scores (tuple[Tensor]): Tuple of box scores, each has shape
- (num_boxes, num_classes + 1).
- bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each
- has shape (num_boxes, num_classes * 4).
- batch_img_metas (list[dict]): List of image information.
- rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN.
- Defaults to None.
- rescale (bool): If True, return boxes in original image space.
- Defaults to False.
-
- Returns:
- list[:obj:`InstanceData`]: Instance segmentation
- results of each image after the post process.
- Each item usually contains following keys.
-
- - scores (Tensor): Classification scores, has a shape
- (num_instance, )
- - labels (Tensor): Labels of bboxes, has a shape
- (num_instances, ).
- - bboxes (Tensor): Has a shape (num_instances, 4),
- the last dimension 4 arrange as (x1, y1, x2, y2).
- """
- assert len(cls_scores) == len(bbox_preds)
- result_list = []
- for img_id in range(len(batch_img_metas)):
- img_meta = batch_img_metas[img_id]
- results = self._predict_by_feat_single(
- roi=rois[img_id],
- cls_score=cls_scores[img_id],
- bbox_pred=bbox_preds[img_id],
- img_meta=img_meta,
- rescale=rescale,
- rcnn_test_cfg=rcnn_test_cfg)
- result_list.append(results)
-
- return result_list
-
- def _predict_by_feat_single(
- self,
- roi: Tensor,
- cls_score: Tensor,
- bbox_pred: Tensor,
- img_meta: dict,
- rescale: bool = False,
- rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:
- """Transform a single image's features extracted from the head into
- bbox results.
-
- Args:
- roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
- last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
- cls_score (Tensor): Box scores, has shape
- (num_boxes, num_classes + 1).
- bbox_pred (Tensor): Box energies / deltas.
- has shape (num_boxes, num_classes * 4).
- img_meta (dict): image information.
- rescale (bool): If True, return boxes in original image space.
- Defaults to False.
- rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
- Defaults to None
-
- Returns:
- :obj:`InstanceData`: Detection results of each image\
- Each item usually contains following keys.
-
- - scores (Tensor): Classification scores, has a shape
- (num_instance, )
- - labels (Tensor): Labels of bboxes, has a shape
- (num_instances, ).
- - bboxes (Tensor): Has a shape (num_instances, 4),
- the last dimension 4 arrange as (x1, y1, x2, y2).
- """
- results = InstanceData()
- if roi.shape[0] == 0:
- return empty_instances([img_meta],
- roi.device,
- task_type='bbox',
- instance_results=[results],
- box_type=self.predict_box_type,
- use_box_type=False,
- num_classes=self.num_classes,
- score_per_cls=rcnn_test_cfg is None)[0]
-
- # some loss (Seesaw loss..) may have custom activation
- if self.custom_cls_channels:
- scores = self.loss_cls.get_activation(cls_score)
- else:
- scores = F.softmax(
- cls_score, dim=-1) if cls_score is not None else None
-
- img_shape = img_meta['img_shape']
- num_rois = roi.size(0)
- # bbox_pred would be None in some detector when with_reg is False,
- # e.g. Grid R-CNN.
- if bbox_pred is not None:
- num_classes = 1 if self.reg_class_agnostic else self.num_classes
- roi = roi.repeat_interleave(num_classes, dim=0)
- bbox_pred = bbox_pred.view(-1, self.bbox_coder.encode_size)
- bboxes = self.bbox_coder.decode(
- roi[..., 1:], bbox_pred, max_shape=img_shape)
- else:
- bboxes = roi[:, 1:].clone()
- if img_shape is not None and bboxes.size(-1) == 4:
- bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])
- bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])
-
- if rescale and bboxes.size(0) > 0:
- assert img_meta.get('scale_factor') is not None
- scale_factor = [1 / s for s in img_meta['scale_factor']]
- bboxes = scale_boxes(bboxes, scale_factor)
-
- # Get the inside tensor when `bboxes` is a box type
- bboxes = get_box_tensor(bboxes)
- box_dim = bboxes.size(-1)
- bboxes = bboxes.view(num_rois, -1)
-
- if rcnn_test_cfg is None:
- # This means that it is aug test.
- # It needs to return the raw results without nms.
- results.bboxes = bboxes
- results.scores = scores
- else:
- det_bboxes, det_labels = multiclass_nms(
- bboxes,
- scores,
- rcnn_test_cfg.score_thr,
- rcnn_test_cfg.nms,
- rcnn_test_cfg.max_per_img,
- box_dim=box_dim)
- results.bboxes = det_bboxes[:, :-1]
- results.scores = det_bboxes[:, -1]
- results.labels = det_labels
- return results
-
- def refine_bboxes(self, sampling_results: Union[List[SamplingResult],
- InstanceList],
- bbox_results: dict,
- batch_img_metas: List[dict]) -> InstanceList:
- """Refine bboxes during training.
-
- Args:
- sampling_results (List[:obj:`SamplingResult`] or
- List[:obj:`InstanceData`]): Sampling results.
- :obj:`SamplingResult` is the real sampling results
- calculate from bbox_head, while :obj:`InstanceData` is
- fake sampling results, e.g., in Sparse R-CNN or QueryInst, etc.
- bbox_results (dict): Usually is a dictionary with keys:
-
- - `cls_score` (Tensor): Classification scores.
- - `bbox_pred` (Tensor): Box energies / deltas.
- - `rois` (Tensor): RoIs with the shape (n, 5) where the first
- column indicates batch id of each RoI.
- - `bbox_targets` (tuple): Ground truth for proposals in a
- single image. Containing the following list of Tensors:
- (labels, label_weights, bbox_targets, bbox_weights)
- batch_img_metas (List[dict]): List of image information.
-
- Returns:
- list[:obj:`InstanceData`]: Refined bboxes of each image.
-
- Example:
- >>> # xdoctest: +REQUIRES(module:kwarray)
- >>> import numpy as np
- >>> from mmdet.models.task_modules.samplers.
- ... sampling_result import random_boxes
- >>> from mmdet.models.task_modules.samplers import SamplingResult
- >>> self = BBoxHead(reg_class_agnostic=True)
- >>> n_roi = 2
- >>> n_img = 4
- >>> scale = 512
- >>> rng = np.random.RandomState(0)
- ... batch_img_metas = [{'img_shape': (scale, scale)}
- >>> for _ in range(n_img)]
- >>> sampling_results = [SamplingResult.random(rng=10)
- ... for _ in range(n_img)]
- >>> # Create rois in the expected format
- >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
- >>> img_ids = torch.randint(0, n_img, (n_roi,))
- >>> img_ids = img_ids.float()
- >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)
- >>> # Create other args
- >>> labels = torch.randint(0, 81, (scale,)).long()
- >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
- >>> cls_score = torch.randn((scale, 81))
- ... # For each image, pretend random positive boxes are gts
- >>> bbox_targets = (labels, None, None, None)
- ... bbox_results = dict(rois=rois, bbox_pred=bbox_preds,
- ... cls_score=cls_score,
- ... bbox_targets=bbox_targets)
- >>> bboxes_list = self.refine_bboxes(sampling_results,
- ... bbox_results,
- ... batch_img_metas)
- >>> print(bboxes_list)
- """
- pos_is_gts = [res.pos_is_gt for res in sampling_results]
- # bbox_targets is a tuple
- labels = bbox_results['bbox_targets'][0]
- cls_scores = bbox_results['cls_score']
- rois = bbox_results['rois']
- bbox_preds = bbox_results['bbox_pred']
- if self.custom_activation:
- # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead
- cls_scores = self.loss_cls.get_activation(cls_scores)
- if cls_scores.numel() == 0:
- return None
- if cls_scores.shape[-1] == self.num_classes + 1:
- # remove background class
- cls_scores = cls_scores[:, :-1]
- elif cls_scores.shape[-1] != self.num_classes:
- raise ValueError('The last dim of `cls_scores` should equal to '
- '`num_classes` or `num_classes + 1`,'
- f'but got {cls_scores.shape[-1]}.')
- labels = torch.where(labels == self.num_classes, cls_scores.argmax(1),
- labels)
-
- img_ids = rois[:, 0].long().unique(sorted=True)
- assert img_ids.numel() <= len(batch_img_metas)
-
- results_list = []
- for i in range(len(batch_img_metas)):
- inds = torch.nonzero(
- rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
- num_rois = inds.numel()
-
- bboxes_ = rois[inds, 1:]
- label_ = labels[inds]
- bbox_pred_ = bbox_preds[inds]
- img_meta_ = batch_img_metas[i]
- pos_is_gts_ = pos_is_gts[i]
-
- bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
- img_meta_)
- # filter gt bboxes
- pos_keep = 1 - pos_is_gts_
- keep_inds = pos_is_gts_.new_ones(num_rois)
- keep_inds[:len(pos_is_gts_)] = pos_keep
- results = InstanceData(bboxes=bboxes[keep_inds.type(torch.bool)])
- results_list.append(results)
-
- return results_list
-
- def regress_by_class(self, priors: Tensor, label: Tensor,
- bbox_pred: Tensor, img_meta: dict) -> Tensor:
- """Regress the bbox for the predicted class. Used in Cascade R-CNN.
-
- Args:
- priors (Tensor): Priors from `rpn_head` or last stage
- `bbox_head`, has shape (num_proposals, 4).
- label (Tensor): Only used when `self.reg_class_agnostic`
- is False, has shape (num_proposals, ).
- bbox_pred (Tensor): Regression prediction of
- current stage `bbox_head`. When `self.reg_class_agnostic`
- is False, it has shape (n, num_classes * 4), otherwise
- it has shape (n, 4).
- img_meta (dict): Image meta info.
-
- Returns:
- Tensor: Regressed bboxes, the same shape as input rois.
- """
- reg_dim = self.bbox_coder.encode_size
- if not self.reg_class_agnostic:
- label = label * reg_dim
- inds = torch.stack([label + i for i in range(reg_dim)], 1)
- bbox_pred = torch.gather(bbox_pred, 1, inds)
- assert bbox_pred.size()[1] == reg_dim
-
- max_shape = img_meta['img_shape']
- regressed_bboxes = self.bbox_coder.decode(
- priors, bbox_pred, max_shape=max_shape)
- return regressed_bboxes
diff --git a/spaces/KyanChen/RSPrompter/mmpretrain/engine/hooks/ema_hook.py b/spaces/KyanChen/RSPrompter/mmpretrain/engine/hooks/ema_hook.py
deleted file mode 100644
index 284d211b628c411f0eb712d1c558dc6aa2eb8996..0000000000000000000000000000000000000000
--- a/spaces/KyanChen/RSPrompter/mmpretrain/engine/hooks/ema_hook.py
+++ /dev/null
@@ -1,216 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import copy
-import itertools
-import warnings
-from typing import Dict, Optional
-
-from mmengine.hooks import EMAHook as BaseEMAHook
-from mmengine.logging import MMLogger
-from mmengine.runner import Runner
-
-from mmpretrain.registry import HOOKS
-
-
-@HOOKS.register_module()
-class EMAHook(BaseEMAHook):
- """A Hook to apply Exponential Moving Average (EMA) on the model during
- training.
-
- Comparing with :class:`mmengine.hooks.EMAHook`, this hook accepts
- ``evaluate_on_ema`` and ``evaluate_on_origin`` arguments. By default, the
- ``evaluate_on_ema`` is enabled, and if you want to do validation and
- testing on both original and EMA models, please set both arguments
- ``True``.
-
- Note:
- - EMAHook takes priority over CheckpointHook.
- - The original model parameters are actually saved in ema field after
- train.
- - ``begin_iter`` and ``begin_epoch`` cannot be set at the same time.
-
- Args:
- ema_type (str): The type of EMA strategy to use. You can find the
- supported strategies in :mod:`mmengine.model.averaged_model`.
- Defaults to 'ExponentialMovingAverage'.
- strict_load (bool): Whether to strictly enforce that the keys of
- ``state_dict`` in checkpoint match the keys returned by
- ``self.module.state_dict``. Defaults to False.
- Changed in v0.3.0.
- begin_iter (int): The number of iteration to enable ``EMAHook``.
- Defaults to 0.
- begin_epoch (int): The number of epoch to enable ``EMAHook``.
- Defaults to 0.
- evaluate_on_ema (bool): Whether to evaluate (validate and test)
- on EMA model during val-loop and test-loop. Defaults to True.
- evaluate_on_origin (bool): Whether to evaluate (validate and test)
- on the original model during val-loop and test-loop.
- Defaults to False.
- **kwargs: Keyword arguments passed to subclasses of
- :obj:`BaseAveragedModel`
- """
-
- priority = 'NORMAL'
-
- def __init__(self,
- ema_type: str = 'ExponentialMovingAverage',
- strict_load: bool = False,
- begin_iter: int = 0,
- begin_epoch: int = 0,
- evaluate_on_ema: bool = True,
- evaluate_on_origin: bool = False,
- **kwargs):
- super().__init__(
- ema_type=ema_type,
- strict_load=strict_load,
- begin_iter=begin_iter,
- begin_epoch=begin_epoch,
- **kwargs)
-
- if not evaluate_on_ema and not evaluate_on_origin:
- warnings.warn(
- 'Automatically set `evaluate_on_origin=True` since the '
- '`evaluate_on_ema` is disabled. If you want to disable '
- 'all validation, please modify the `val_interval` of '
- 'the `train_cfg`.', UserWarning)
- evaluate_on_origin = True
-
- self.evaluate_on_ema = evaluate_on_ema
- self.evaluate_on_origin = evaluate_on_origin
- self.load_ema_from_ckpt = False
-
- def before_train(self, runner) -> None:
- super().before_train(runner)
- if not runner._resume and self.load_ema_from_ckpt:
- # If loaded EMA state dict but not want to resume training
- # overwrite the EMA state dict with the source model.
- MMLogger.get_current_instance().info(
- 'Load from a checkpoint with EMA parameters but not '
- 'resume training. Initialize the model parameters with '
- 'EMA parameters')
- for p_ema, p_src in zip(self._ema_params, self._src_params):
- p_src.data.copy_(p_ema.data)
-
- def before_val_epoch(self, runner) -> None:
- """We load parameter values from ema model to source model before
- validation.
-
- Args:
- runner (Runner): The runner of the training process.
- """
- if self.evaluate_on_ema:
- # Swap when evaluate on ema
- self._swap_ema_parameters()
-
- def after_val_epoch(self,
- runner,
- metrics: Optional[Dict[str, float]] = None) -> None:
- """We recover source model's parameter from ema model after validation.
-
- Args:
- runner (Runner): The runner of the validation process.
- metrics (Dict[str, float], optional): Evaluation results of all
- metrics on validation dataset. The keys are the names of the
- metrics, and the values are corresponding results.
- """
- if self.evaluate_on_ema:
- # Swap when evaluate on ema
- self._swap_ema_parameters()
-
- if self.evaluate_on_ema and self.evaluate_on_origin:
- # Re-evaluate if evaluate on both ema and origin.
- val_loop = runner.val_loop
-
- runner.model.eval()
- for idx, data_batch in enumerate(val_loop.dataloader):
- val_loop.run_iter(idx, data_batch)
-
- # compute metrics
- origin_metrics = val_loop.evaluator.evaluate(
- len(val_loop.dataloader.dataset))
-
- for k, v in origin_metrics.items():
- runner.message_hub.update_scalar(f'val/{k}_origin', v)
-
- def before_test_epoch(self, runner) -> None:
- """We load parameter values from ema model to source model before test.
-
- Args:
- runner (Runner): The runner of the training process.
- """
- if self.evaluate_on_ema:
- # Swap when evaluate on ema
- self._swap_ema_parameters()
- MMLogger.get_current_instance().info('Start testing on EMA model.')
- else:
- MMLogger.get_current_instance().info(
- 'Start testing on the original model.')
-
- def after_test_epoch(self,
- runner: Runner,
- metrics: Optional[Dict[str, float]] = None) -> None:
- """We recover source model's parameter from ema model after test.
-
- Args:
- runner (Runner): The runner of the testing process.
- metrics (Dict[str, float], optional): Evaluation results of all
- metrics on test dataset. The keys are the names of the
- metrics, and the values are corresponding results.
- """
- if self.evaluate_on_ema:
- # Swap when evaluate on ema
- self._swap_ema_parameters()
-
- if self.evaluate_on_ema and self.evaluate_on_origin:
- # Re-evaluate if evaluate on both ema and origin.
- MMLogger.get_current_instance().info(
- 'Start testing on the original model.')
- test_loop = runner.test_loop
-
- runner.model.eval()
- for idx, data_batch in enumerate(test_loop.dataloader):
- test_loop.run_iter(idx, data_batch)
-
- # compute metrics
- origin_metrics = test_loop.evaluator.evaluate(
- len(test_loop.dataloader.dataset))
-
- for k, v in origin_metrics.items():
- runner.message_hub.update_scalar(f'test/{k}_origin', v)
-
- def after_load_checkpoint(self, runner, checkpoint: dict) -> None:
- """Resume ema parameters from checkpoint.
-
- Args:
- runner (Runner): The runner of the testing process.
- """
- from mmengine.runner.checkpoint import load_state_dict
- if 'ema_state_dict' in checkpoint:
- # The original model parameters are actually saved in ema
- # field swap the weights back to resume ema state.
- self._swap_ema_state_dict(checkpoint)
- self.ema_model.load_state_dict(
- checkpoint['ema_state_dict'], strict=self.strict_load)
- self.load_ema_from_ckpt = True
-
- # Support load checkpoint without ema state dict.
- else:
- load_state_dict(
- self.ema_model.module,
- copy.deepcopy(checkpoint['state_dict']),
- strict=self.strict_load)
-
- @property
- def _src_params(self):
- if self.ema_model.update_buffers:
- return itertools.chain(self.src_model.parameters(),
- self.src_model.buffers())
- else:
- return self.src_model.parameters()
-
- @property
- def _ema_params(self):
- if self.ema_model.update_buffers:
- return itertools.chain(self.ema_model.module.parameters(),
- self.ema_model.module.buffers())
- else:
- return self.ema_model.module.parameters()
diff --git a/spaces/LearnableAI/FinTextSummaryDemo/model.py b/spaces/LearnableAI/FinTextSummaryDemo/model.py
deleted file mode 100644
index 485b6fc3ae9b9ed63344352c61803cacb3f395b6..0000000000000000000000000000000000000000
--- a/spaces/LearnableAI/FinTextSummaryDemo/model.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""
- 文件说明:
- GPT2模型文件,主要对transformers包中GPT2LMHeadModel的重写,修改计算loss部分,只计算预测title部分的loss
-"""
-
-from torch.nn import CrossEntropyLoss
-import torch.nn as nn
-from transformers.models.gpt2.modeling_gpt2 import GPT2PreTrainedModel, GPT2Model
-
-
-class GPT2LMHeadModel(GPT2PreTrainedModel):
- """GPT2模型"""
- def __init__(self, config):
- """
- 初始化函数
- Args:
- config: 配置参数
- """
- super().__init__(config)
- self.transformer = GPT2Model(config)
- self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
- self.init_weights()
-
- def forward(self, input_ids=None, past=None, token_type_ids=None, labels=None, title_id=None):
- """
- 前向函数,计算GPT2预测结果值
- Args:
- input_ids: 输入序列在词表中的索引序列,size:[batch_size, sequence_length]
- past: 包含由模型预先计算好的隐藏状态,一般使用在预测阶段,用于加速顺序解码,防止重复计算前面计算过的token
- token_type_ids: 用于区分输入序列中content和title的分隔符序列,size:[batch_size, sequence_length]
- labels: 标签序列,size:[batch_size, sequence_length],一般情况下,与input_ids相同
- title_id: title部分分隔符的id
- Returns:
-
- """
- # 获取GPT2模型的输出结果
- transformer_outputs = self.transformer(input_ids, past_key_values=past, token_type_ids=token_type_ids)
- # 获取GPT2模型的最后一层的隐层节点状态,size:[batch_size, sequence_length, config.n_embd]
- hidden_states = transformer_outputs[0]
- # 预测隐层节点状态中的每一个token的下一个token,size:[batch_size, sequence_length, config.vocab_size]
- lm_logits = self.lm_head(hidden_states)
- # 拼接输出结果
- outputs = (lm_logits,) + transformer_outputs[1:]
- # 如果labels不为None时,计算损失值loss,并拼接到输出结果中
- if labels is not None:
- # 计算loss时,title_id不可以为None,因为需要title_id找到title的部分
- if title_id is None or token_type_ids is None:
- raise Exception("当labels不为None时, title_id和token_type_ids均不可以为None。")
- # 获取mask值,如果token_type_ids中等于title_id的部分需要计算loss,标记为1;否则为0。
- # size:[batch_size, sequence_length]
- mask = (token_type_ids == title_id).long()
- # 获取新的标签,size:[batch_size, sequence_length]
- labels = labels * mask
- # 对预测结果和标签进行偏移操作
- # GPT2的生成机制为通过前面的token,预测下一个token;并且labels与input_ids相同,
- # 因此input_ids中的第一个token的预测结果,实际上是标签中的第二个token,以此类推,最终仅计算sequence_length-1个token的loss
- shift_logits = lm_logits[..., :-1, :].contiguous()
- shift_labels = labels[..., 1:].contiguous()
-
- # 定义损失函数CrossEntropyLoss,并且设置忽略计算loss的索引,以及返回loss的形式
- # 忽略shift_labels中为0的loss,也就是仅计算title部分的损失值
- # 对loss的计算方式设为sum,由于我们仅计算了itle部分的损失值,如果使用mean,会使loss变小(实际除的是sequence_length-1,不是title部分的真实长度)
- loss_fct = CrossEntropyLoss(ignore_index=0, reduction="sum")
- loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
- # 获取title部分的真实长度,并计算真实loss
- num = shift_labels.ne(0).long().sum().item()
- loss = loss / num
- outputs = (loss,) + outputs
- return outputs # (loss), lm_logits, presents, (all hidden_states), (attentions)
-
diff --git a/spaces/LightChen2333/OpenSLU/model/decoder/interaction/gl_gin_interaction.py b/spaces/LightChen2333/OpenSLU/model/decoder/interaction/gl_gin_interaction.py
deleted file mode 100644
index d6e697866719d3e71fca9a1ec91cd95dce81b9c1..0000000000000000000000000000000000000000
--- a/spaces/LightChen2333/OpenSLU/model/decoder/interaction/gl_gin_interaction.py
+++ /dev/null
@@ -1,227 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
-
-from common.utils import HiddenData, ClassifierOutputData
-from model.decoder.interaction import BaseInteraction
-
-
-class LSTMEncoder(nn.Module):
- """
- Encoder structure based on bidirectional LSTM.
- """
-
- def __init__(self, embedding_dim, hidden_dim, dropout_rate):
- super(LSTMEncoder, self).__init__()
-
- # Parameter recording.
- self.__embedding_dim = embedding_dim
- self.__hidden_dim = hidden_dim // 2
- self.__dropout_rate = dropout_rate
-
- # Network attributes.
- self.__dropout_layer = nn.Dropout(self.__dropout_rate)
- self.__lstm_layer = nn.LSTM(
- input_size=self.__embedding_dim,
- hidden_size=self.__hidden_dim,
- batch_first=True,
- bidirectional=True,
- dropout=self.__dropout_rate,
- num_layers=1
- )
-
- def forward(self, embedded_text, seq_lens):
- """ Forward process for LSTM Encoder.
-
- (batch_size, max_sent_len)
- -> (batch_size, max_sent_len, word_dim)
- -> (batch_size, max_sent_len, hidden_dim)
-
- :param embedded_text: padded and embedded input text.
- :param seq_lens: is the length of original input text.
- :return: is encoded word hidden vectors.
- """
-
- # Padded_text should be instance of LongTensor.
- dropout_text = self.__dropout_layer(embedded_text)
-
- # Pack and Pad process for input of variable length.
- packed_text = pack_padded_sequence(dropout_text, seq_lens.cpu(), batch_first=True, enforce_sorted=False)
- lstm_hiddens, (h_last, c_last) = self.__lstm_layer(packed_text)
- padded_hiddens, _ = pad_packed_sequence(lstm_hiddens, batch_first=True)
-
- return padded_hiddens
-
-
-class GraphAttentionLayer(nn.Module):
- """
- Simple GAT layer, similar to https://arxiv.org/abs/1710.10903
- """
-
- def __init__(self, in_features, out_features, dropout, alpha, concat=True):
- super(GraphAttentionLayer, self).__init__()
- self.dropout = dropout
- self.in_features = in_features
- self.out_features = out_features
- self.alpha = alpha
- self.concat = concat
-
- self.W = nn.Parameter(torch.zeros(size=(in_features, out_features)))
- nn.init.xavier_uniform_(self.W.data, gain=1.414)
- self.a = nn.Parameter(torch.zeros(size=(2 * out_features, 1)))
- nn.init.xavier_uniform_(self.a.data, gain=1.414)
-
- self.leakyrelu = nn.LeakyReLU(self.alpha)
-
- def forward(self, input, adj):
- h = torch.matmul(input, self.W)
- B, N = h.size()[0], h.size()[1]
-
- a_input = torch.cat([h.repeat(1, 1, N).view(B, N * N, -1), h.repeat(1, N, 1)], dim=2).view(B, N, -1,
- 2 * self.out_features)
- e = self.leakyrelu(torch.matmul(a_input, self.a).squeeze(3))
-
- zero_vec = -9e15 * torch.ones_like(e)
- attention = torch.where(adj > 0, e, zero_vec)
- attention = F.softmax(attention, dim=2)
- attention = F.dropout(attention, self.dropout, training=self.training)
- h_prime = torch.matmul(attention, h)
-
- if self.concat:
- return F.elu(h_prime)
- else:
- return h_prime
-
-
-class GAT(nn.Module):
- def __init__(self, nfeat, nhid, nclass, dropout, alpha, nheads, nlayers=2):
- """Dense version of GAT."""
- super(GAT, self).__init__()
- self.dropout = dropout
- self.nlayers = nlayers
- self.nheads = nheads
- self.attentions = [GraphAttentionLayer(nfeat, nhid, dropout=dropout, alpha=alpha, concat=True) for _ in
- range(nheads)]
- for i, attention in enumerate(self.attentions):
- self.add_module('attention_{}'.format(i), attention)
- if self.nlayers > 2:
- for i in range(self.nlayers - 2):
- for j in range(self.nheads):
- self.add_module('attention_{}_{}'.format(i + 1, j),
- GraphAttentionLayer(nhid * nheads, nhid, dropout=dropout, alpha=alpha, concat=True))
-
- self.out_att = GraphAttentionLayer(nhid * nheads, nclass, dropout=dropout, alpha=alpha, concat=False)
-
- def forward(self, x, adj):
- x = F.dropout(x, self.dropout, training=self.training)
- input = x
- x = torch.cat([att(x, adj) for att in self.attentions], dim=2)
- if self.nlayers > 2:
- for i in range(self.nlayers - 2):
- temp = []
- x = F.dropout(x, self.dropout, training=self.training)
- cur_input = x
- for j in range(self.nheads):
- temp.append(self.__getattr__('attention_{}_{}'.format(i + 1, j))(x, adj))
- x = torch.cat(temp, dim=2) + cur_input
- x = F.dropout(x, self.dropout, training=self.training)
- x = F.elu(self.out_att(x, adj))
- return x + input
-
-
-def normalize_adj(mx):
- """
- Row-normalize matrix D^{-1}A
- torch.diag_embed: https://github.com/pytorch/pytorch/pull/12447
- """
- mx = mx.float()
- rowsum = mx.sum(2)
- r_inv = torch.pow(rowsum, -1)
- r_inv[torch.isinf(r_inv)] = 0.
- r_mat_inv = torch.diag_embed(r_inv, 0)
- mx = r_mat_inv.matmul(mx)
- return mx
-
-
-class GLGINInteraction(BaseInteraction):
- def __init__(self, **config):
- super().__init__(**config)
- self.intent_embedding = nn.Parameter(
- torch.FloatTensor(self.config["intent_label_num"], self.config["intent_embedding_dim"])) # 191, 32
- nn.init.normal_(self.intent_embedding.data)
- self.adj = None
- self.__slot_lstm = LSTMEncoder(
- self.config["input_dim"] + self.config["intent_label_num"],
- config["output_dim"],
- config["dropout_rate"]
- )
- self.__slot_graph = GAT(
- config["output_dim"],
- config["hidden_dim"],
- config["output_dim"],
- config["dropout_rate"],
- config["alpha"],
- config["num_heads"],
- config["num_layers"])
-
- self.__global_graph = GAT(
- config["output_dim"],
- config["hidden_dim"],
- config["output_dim"],
- config["dropout_rate"],
- config["alpha"],
- config["num_heads"],
- config["num_layers"])
-
- def generate_global_adj_gat(self, seq_len, index, batch, window):
- global_intent_idx = [[] for i in range(batch)]
- global_slot_idx = [[] for i in range(batch)]
- for item in index:
- global_intent_idx[item[0]].append(item[1])
-
- for i, len in enumerate(seq_len):
- global_slot_idx[i].extend(
- list(range(self.config["intent_label_num"], self.config["intent_label_num"] + len)))
-
- adj = torch.cat([torch.eye(self.config["intent_label_num"] + max(seq_len)).unsqueeze(0) for i in range(batch)])
- for i in range(batch):
- for j in global_intent_idx[i]:
- adj[i, j, global_slot_idx[i]] = 1.
- adj[i, j, global_intent_idx[i]] = 1.
- for j in global_slot_idx[i]:
- adj[i, j, global_intent_idx[i]] = 1.
-
- for i in range(batch):
- for j in range(self.config["intent_label_num"], self.config["intent_label_num"] + seq_len[i]):
- adj[i, j, max(self.config["intent_label_num"], j - window):min(seq_len[i] + self.config["intent_label_num"], j + window + 1)] = 1.
-
- if self.config["row_normalized"]:
- adj = normalize_adj(adj)
- adj = adj.to(self.intent_embedding.device)
- return adj
-
- def generate_slot_adj_gat(self, seq_len, batch, window):
- slot_idx_ = [[] for i in range(batch)]
- adj = torch.cat([torch.eye(max(seq_len)).unsqueeze(0) for i in range(batch)])
- for i in range(batch):
- for j in range(seq_len[i]):
- adj[i, j, max(0, j - window):min(seq_len[i], j + window + 1)] = 1.
- if self.config["row_normalized"]:
- adj = normalize_adj(adj)
- adj = adj.to(self.intent_embedding.device)
- return adj
-
- def forward(self, encode_hidden: HiddenData, pred_intent: ClassifierOutputData = None, intent_index=None):
- seq_lens = encode_hidden.inputs.attention_mask.sum(-1)
- slot_lstm_out = self.__slot_lstm(torch.cat([encode_hidden.slot_hidden, pred_intent.classifier_output], dim=-1),
- seq_lens)
- global_adj = self.generate_global_adj_gat(seq_lens, intent_index, len(seq_lens),
- self.config["slot_graph_window"])
- slot_adj = self.generate_slot_adj_gat(seq_lens, len(seq_lens), self.config["slot_graph_window"])
- batch = len(seq_lens)
- slot_graph_out = self.__slot_graph(slot_lstm_out, slot_adj)
- intent_in = self.intent_embedding.unsqueeze(0).repeat(batch, 1, 1)
- global_graph_in = torch.cat([intent_in, slot_graph_out], dim=1)
- encode_hidden.update_slot_hidden_state(self.__global_graph(global_graph_in, global_adj))
- return encode_hidden
diff --git "a/spaces/Liu-LAB/GPT-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py" "b/spaces/Liu-LAB/GPT-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py"
deleted file mode 100644
index 554c485aa0891f74c57cacfcbe076febe7a11029..0000000000000000000000000000000000000000
--- "a/spaces/Liu-LAB/GPT-academic/crazy_functions/Latex\345\205\250\346\226\207\347\277\273\350\257\221.py"
+++ /dev/null
@@ -1,175 +0,0 @@
-from toolbox import update_ui
-from toolbox import CatchException, report_execption, write_results_to_file
-fast_debug = False
-
-class PaperFileGroup():
- def __init__(self):
- self.file_paths = []
- self.file_contents = []
- self.sp_file_contents = []
- self.sp_file_index = []
- self.sp_file_tag = []
-
- # count_token
- from request_llm.bridge_all import model_info
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
- def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
- self.get_token_num = get_token_num
-
- def run_file_split(self, max_token_limit=1900):
- """
- 将长文本分离开来
- """
- for index, file_content in enumerate(self.file_contents):
- if self.get_token_num(file_content) < max_token_limit:
- self.sp_file_contents.append(file_content)
- self.sp_file_index.append(index)
- self.sp_file_tag.append(self.file_paths[index])
- else:
- from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
- segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
- for j, segment in enumerate(segments):
- self.sp_file_contents.append(segment)
- self.sp_file_index.append(index)
- self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.tex")
-
- print('Segmentation: done')
-
-def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
- import time, os, re
- from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
-
- # <-------- 读取Latex文件,删除其中的所有注释 ---------->
- pfg = PaperFileGroup()
-
- for index, fp in enumerate(file_manifest):
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
- file_content = f.read()
- # 定义注释的正则表达式
- comment_pattern = r'(?
- pfg.run_file_split(max_token_limit=1024)
- n_split = len(pfg.sp_file_contents)
-
- # <-------- 抽取摘要 ---------->
- # if language == 'en':
- # abs_extract_inputs = f"Please write an abstract for this paper"
-
- # # 单线,获取文章meta信息
- # paper_meta_info = yield from request_gpt_model_in_new_thread_with_ui_alive(
- # inputs=abs_extract_inputs,
- # inputs_show_user=f"正在抽取摘要信息。",
- # llm_kwargs=llm_kwargs,
- # chatbot=chatbot, history=[],
- # sys_prompt="Your job is to collect information from materials。",
- # )
-
- # <-------- 多线程润色开始 ---------->
- if language == 'en->zh':
- inputs_array = ["Below is a section from an English academic paper, translate it into Chinese, do not modify any latex command such as \section, \cite and equations:" +
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
- inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
- sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
- elif language == 'zh->en':
- inputs_array = [f"Below is a section from a Chinese academic paper, translate it into English, do not modify any latex command such as \section, \cite and equations:" +
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
- inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
- sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
-
- gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
- inputs_array=inputs_array,
- inputs_show_user_array=inputs_show_user_array,
- llm_kwargs=llm_kwargs,
- chatbot=chatbot,
- history_array=[[""] for _ in range(n_split)],
- sys_prompt_array=sys_prompt_array,
- # max_workers=5, # OpenAI所允许的最大并行过载
- scroller_max_len = 80
- )
-
- # <-------- 整理结果,退出 ---------->
- create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
- res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
- history = gpt_response_collection
- chatbot.append((f"{fp}完成了吗?", res))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-
-
-
-
-@CatchException
-def Latex英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- history = [] # 清空历史,以免输入溢出
- import glob, os
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
-
-
-
-
-
-@CatchException
-def Latex中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "对整个Latex项目进行翻译。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- history = [] # 清空历史,以免输入溢出
- import glob, os
- if os.path.exists(txt):
- project_folder = txt
- else:
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)]
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
\ No newline at end of file
diff --git "a/spaces/Liu-LAB/GPT-academic/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py" "b/spaces/Liu-LAB/GPT-academic/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py"
deleted file mode 100644
index 8af98640bf51a4edf19254c05d68fad282d02776..0000000000000000000000000000000000000000
--- "a/spaces/Liu-LAB/GPT-academic/crazy_functions/\346\211\271\351\207\217Markdown\347\277\273\350\257\221.py"
+++ /dev/null
@@ -1,261 +0,0 @@
-import glob, time, os, re
-from toolbox import update_ui, trimmed_format_exc, gen_time_str, disable_auto_promotion
-from toolbox import CatchException, report_execption, write_history_to_file
-from toolbox import promote_file_to_downloadzone, get_log_folder
-fast_debug = False
-
-class PaperFileGroup():
- def __init__(self):
- self.file_paths = []
- self.file_contents = []
- self.sp_file_contents = []
- self.sp_file_index = []
- self.sp_file_tag = []
-
- # count_token
- from request_llm.bridge_all import model_info
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
- def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
- self.get_token_num = get_token_num
-
- def run_file_split(self, max_token_limit=1900):
- """
- 将长文本分离开来
- """
- for index, file_content in enumerate(self.file_contents):
- if self.get_token_num(file_content) < max_token_limit:
- self.sp_file_contents.append(file_content)
- self.sp_file_index.append(index)
- self.sp_file_tag.append(self.file_paths[index])
- else:
- from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
- segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
- for j, segment in enumerate(segments):
- self.sp_file_contents.append(segment)
- self.sp_file_index.append(index)
- self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md")
- print('Segmentation: done')
-
- def merge_result(self):
- self.file_result = ["" for _ in range(len(self.file_paths))]
- for r, k in zip(self.sp_file_result, self.sp_file_index):
- self.file_result[k] += r
-
- def write_result(self, language):
- manifest = []
- for path, res in zip(self.file_paths, self.file_result):
- dst_file = os.path.join(get_log_folder(), f'{gen_time_str()}.md')
- with open(dst_file, 'w', encoding='utf8') as f:
- manifest.append(dst_file)
- f.write(res)
- return manifest
-
-def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
- from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
-
- # <-------- 读取Markdown文件,删除其中的所有注释 ---------->
- pfg = PaperFileGroup()
-
- for index, fp in enumerate(file_manifest):
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
- file_content = f.read()
- # 记录删除注释后的文本
- pfg.file_paths.append(fp)
- pfg.file_contents.append(file_content)
-
- # <-------- 拆分过长的Markdown文件 ---------->
- pfg.run_file_split(max_token_limit=1500)
- n_split = len(pfg.sp_file_contents)
-
- # <-------- 多线程翻译开始 ---------->
- if language == 'en->zh':
- inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" +
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
- inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
- sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
- elif language == 'zh->en':
- inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" +
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
- inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
- sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
- else:
- inputs_array = [f"This is a Markdown file, translate it into {language}, do not modify any existing Markdown commands, only answer me with translated results:" +
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
- inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
- sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
-
- gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
- inputs_array=inputs_array,
- inputs_show_user_array=inputs_show_user_array,
- llm_kwargs=llm_kwargs,
- chatbot=chatbot,
- history_array=[[""] for _ in range(n_split)],
- sys_prompt_array=sys_prompt_array,
- # max_workers=5, # OpenAI所允许的最大并行过载
- scroller_max_len = 80
- )
- try:
- pfg.sp_file_result = []
- for i_say, gpt_say in zip(gpt_response_collection[0::2], gpt_response_collection[1::2]):
- pfg.sp_file_result.append(gpt_say)
- pfg.merge_result()
- pfg.write_result(language)
- except:
- print(trimmed_format_exc())
-
- # <-------- 整理结果,退出 ---------->
- create_report_file_name = gen_time_str() + f"-chatgpt.md"
- res = write_history_to_file(gpt_response_collection, file_basename=create_report_file_name)
- promote_file_to_downloadzone(res, chatbot=chatbot)
- history = gpt_response_collection
- chatbot.append((f"{fp}完成了吗?", res))
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
-
-
-def get_files_from_everything(txt, preference=''):
- if txt == "": return False, None, None
- success = True
- if txt.startswith('http'):
- import requests
- from toolbox import get_conf
- proxies, = get_conf('proxies')
- # 网络的远程文件
- if preference == 'Github':
- print('正在从github下载资源 ...')
- if not txt.endswith('.md'):
- # Make a request to the GitHub API to retrieve the repository information
- url = txt.replace("https://github.com/", "https://api.github.com/repos/") + '/readme'
- response = requests.get(url, proxies=proxies)
- txt = response.json()['download_url']
- else:
- txt = txt.replace("https://github.com/", "https://raw.githubusercontent.com/")
- txt = txt.replace("/blob/", "/")
-
- r = requests.get(txt, proxies=proxies)
- download_local = f'{get_log_folder(plugin_name="批量Markdown翻译")}/raw-readme-{gen_time_str()}.md'
- project_folder = f'{get_log_folder(plugin_name="批量Markdown翻译")}'
- with open(download_local, 'wb+') as f: f.write(r.content)
- file_manifest = [download_local]
- elif txt.endswith('.md'):
- # 直接给定文件
- file_manifest = [txt]
- project_folder = os.path.dirname(txt)
- elif os.path.exists(txt):
- # 本地路径,递归搜索
- project_folder = txt
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
- else:
- project_folder = None
- file_manifest = []
- success = False
-
- return success, file_manifest, project_folder
-
-
-@CatchException
-def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- disable_auto_promotion(chatbot)
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- history = [] # 清空历史,以免输入溢出
-
- success, file_manifest, project_folder = get_files_from_everything(txt, preference="Github")
-
- if not success:
- # 什么都没有
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
-
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
-
- yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
-
-
-
-
-
-@CatchException
-def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- disable_auto_promotion(chatbot)
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- history = [] # 清空历史,以免输入溢出
- success, file_manifest, project_folder = get_files_from_everything(txt)
- if not success:
- # 什么都没有
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
-
-
-@CatchException
-def Markdown翻译指定语言(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
- # 基本信息:功能、贡献者
- chatbot.append([
- "函数插件功能?",
- "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- disable_auto_promotion(chatbot)
-
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
- try:
- import tiktoken
- except:
- report_execption(chatbot, history,
- a=f"解析项目: {txt}",
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- history = [] # 清空历史,以免输入溢出
- success, file_manifest, project_folder = get_files_from_everything(txt)
- if not success:
- # 什么都没有
- if txt == "": txt = '空空如也的输入栏'
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
- if len(file_manifest) == 0:
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
- return
-
- if ("advanced_arg" in plugin_kwargs) and (plugin_kwargs["advanced_arg"] == ""): plugin_kwargs.pop("advanced_arg")
- language = plugin_kwargs.get("advanced_arg", 'Chinese')
- yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language=language)
\ No newline at end of file
diff --git a/spaces/LiuZiyi/1-video-video2txt-whisper-yt/app.py b/spaces/LiuZiyi/1-video-video2txt-whisper-yt/app.py
deleted file mode 100644
index 914efc58e281850f11d294a8fd88f28057e6e144..0000000000000000000000000000000000000000
--- a/spaces/LiuZiyi/1-video-video2txt-whisper-yt/app.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import streamlit as st
-import whisper
-import os
-from pytube import YouTube
-
-st.title("YouTube URL Video-to-Text")
-
-model = whisper.load_model("base")
-
-youtube_URL = st.text_input(label="Enter your YouTube URL")
-
-if youtube_URL:
- # Get the video
- video = YouTube(youtube_URL)
-
- # Convert video to audio
- audio = video.streams.filter(only_audio=True).first()
- output = audio.download(output_path = ".")
- base, ext = os.path.splitext(output)
- new_file = base + '.mp3'
-
- # Change the name of the file
- os.rename(output, new_file)
-
- a = new_file
-
- # st.write(a)
- result = model.transcribe(a)
- st.write(result["text"])
-else:
- st.error("☢️ Enter youtube URL")
diff --git a/spaces/LongBeattz/runwayml-stable-diffusion-v1-5/app.py b/spaces/LongBeattz/runwayml-stable-diffusion-v1-5/app.py
deleted file mode 100644
index a82df332731f067826d3e1ef79fabceffb74d07e..0000000000000000000000000000000000000000
--- a/spaces/LongBeattz/runwayml-stable-diffusion-v1-5/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch()
\ No newline at end of file
diff --git a/spaces/MAEBA96/SUMMARISER96/app.py b/spaces/MAEBA96/SUMMARISER96/app.py
deleted file mode 100644
index c9e32a058fd184930cdf74bb3cf2f155a79b39cc..0000000000000000000000000000000000000000
--- a/spaces/MAEBA96/SUMMARISER96/app.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import gradio as gr
-from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
-
-def summarize_text(input_text, max_summary_length):
- summarizer = pipeline(
- task="summarization",
- model="t5-base",
- min_length=20,
- max_length=1024,
- truncation=True,
- model_kwargs={"cache_dir": '/Documents/Huggin_Face/'}
- )
-
- # Generate the summary
- output = summarizer(input_text, max_length=max_summary_length, min_length=30, do_sample=False)
- summary = output[0]['summary_text']
-
- return summary
-
-# Interface
-iface = gr.Interface(
- fn=summarize_text,
- inputs=[
- gr.inputs.Textbox(lines=10, label="Upload or paste text", placeholder="Paste your text here..."),
- gr.inputs.Slider(minimum=50, maximum=500, default=150, label="Max Summary Length"),
- ],
- outputs=gr.outputs.Textbox(label="NeuraShift AI Text Summarizer"),
- layout="vertical",
- theme="advanced",
- live=True,
- examples=None # Remove the examples
-)
-
-iface.launch()
diff --git a/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/models/pix2pix_model.py b/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/models/pix2pix_model.py
deleted file mode 100644
index 41d6df671752f11ab7001d5b1b3e82034c2e6493..0000000000000000000000000000000000000000
--- a/spaces/MCkernick/Image_Restoration_Colorization/Face_Enhancement/models/pix2pix_model.py
+++ /dev/null
@@ -1,246 +0,0 @@
-# Copyright (c) Microsoft Corporation.
-# Licensed under the MIT License.
-
-import torch
-import models.networks as networks
-import util.util as util
-
-
-class Pix2PixModel(torch.nn.Module):
- @staticmethod
- def modify_commandline_options(parser, is_train):
- networks.modify_commandline_options(parser, is_train)
- return parser
-
- def __init__(self, opt):
- super().__init__()
- self.opt = opt
- self.FloatTensor = torch.cuda.FloatTensor if self.use_gpu() else torch.FloatTensor
- self.ByteTensor = torch.cuda.ByteTensor if self.use_gpu() else torch.ByteTensor
-
- self.netG, self.netD, self.netE = self.initialize_networks(opt)
-
- # set loss functions
- if opt.isTrain:
- self.criterionGAN = networks.GANLoss(opt.gan_mode, tensor=self.FloatTensor, opt=self.opt)
- self.criterionFeat = torch.nn.L1Loss()
- if not opt.no_vgg_loss:
- self.criterionVGG = networks.VGGLoss(self.opt.gpu_ids)
- if opt.use_vae:
- self.KLDLoss = networks.KLDLoss()
-
- # Entry point for all calls involving forward pass
- # of deep networks. We used this approach since DataParallel module
- # can't parallelize custom functions, we branch to different
- # routines based on |mode|.
- def forward(self, data, mode):
- input_semantics, real_image, degraded_image = self.preprocess_input(data)
-
- if mode == "generator":
- g_loss, generated = self.compute_generator_loss(input_semantics, degraded_image, real_image)
- return g_loss, generated
- elif mode == "discriminator":
- d_loss = self.compute_discriminator_loss(input_semantics, degraded_image, real_image)
- return d_loss
- elif mode == "encode_only":
- z, mu, logvar = self.encode_z(real_image)
- return mu, logvar
- elif mode == "inference":
- with torch.no_grad():
- fake_image, _ = self.generate_fake(input_semantics, degraded_image, real_image)
- return fake_image
- else:
- raise ValueError("|mode| is invalid")
-
- def create_optimizers(self, opt):
- G_params = list(self.netG.parameters())
- if opt.use_vae:
- G_params += list(self.netE.parameters())
- if opt.isTrain:
- D_params = list(self.netD.parameters())
-
- beta1, beta2 = opt.beta1, opt.beta2
- if opt.no_TTUR:
- G_lr, D_lr = opt.lr, opt.lr
- else:
- G_lr, D_lr = opt.lr / 2, opt.lr * 2
-
- optimizer_G = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2))
- optimizer_D = torch.optim.Adam(D_params, lr=D_lr, betas=(beta1, beta2))
-
- return optimizer_G, optimizer_D
-
- def save(self, epoch):
- util.save_network(self.netG, "G", epoch, self.opt)
- util.save_network(self.netD, "D", epoch, self.opt)
- if self.opt.use_vae:
- util.save_network(self.netE, "E", epoch, self.opt)
-
- ############################################################################
- # Private helper methods
- ############################################################################
-
- def initialize_networks(self, opt):
- netG = networks.define_G(opt)
- netD = networks.define_D(opt) if opt.isTrain else None
- netE = networks.define_E(opt) if opt.use_vae else None
-
- if not opt.isTrain or opt.continue_train:
- netG = util.load_network(netG, "G", opt.which_epoch, opt)
- if opt.isTrain:
- netD = util.load_network(netD, "D", opt.which_epoch, opt)
- if opt.use_vae:
- netE = util.load_network(netE, "E", opt.which_epoch, opt)
-
- return netG, netD, netE
-
- # preprocess the input, such as moving the tensors to GPUs and
- # transforming the label map to one-hot encoding
- # |data|: dictionary of the input data
-
- def preprocess_input(self, data):
- # move to GPU and change data types
- # data['label'] = data['label'].long()
-
- if not self.opt.isTrain:
- if self.use_gpu():
- data["label"] = data["label"].cuda()
- data["image"] = data["image"].cuda()
- return data["label"], data["image"], data["image"]
-
- ## While testing, the input image is the degraded face
- if self.use_gpu():
- data["label"] = data["label"].cuda()
- data["degraded_image"] = data["degraded_image"].cuda()
- data["image"] = data["image"].cuda()
-
- # # create one-hot label map
- # label_map = data['label']
- # bs, _, h, w = label_map.size()
- # nc = self.opt.label_nc + 1 if self.opt.contain_dontcare_label \
- # else self.opt.label_nc
- # input_label = self.FloatTensor(bs, nc, h, w).zero_()
- # input_semantics = input_label.scatter_(1, label_map, 1.0)
-
- return data["label"], data["image"], data["degraded_image"]
-
- def compute_generator_loss(self, input_semantics, degraded_image, real_image):
- G_losses = {}
-
- fake_image, KLD_loss = self.generate_fake(
- input_semantics, degraded_image, real_image, compute_kld_loss=self.opt.use_vae
- )
-
- if self.opt.use_vae:
- G_losses["KLD"] = KLD_loss
-
- pred_fake, pred_real = self.discriminate(input_semantics, fake_image, real_image)
-
- G_losses["GAN"] = self.criterionGAN(pred_fake, True, for_discriminator=False)
-
- if not self.opt.no_ganFeat_loss:
- num_D = len(pred_fake)
- GAN_Feat_loss = self.FloatTensor(1).fill_(0)
- for i in range(num_D): # for each discriminator
- # last output is the final prediction, so we exclude it
- num_intermediate_outputs = len(pred_fake[i]) - 1
- for j in range(num_intermediate_outputs): # for each layer output
- unweighted_loss = self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach())
- GAN_Feat_loss += unweighted_loss * self.opt.lambda_feat / num_D
- G_losses["GAN_Feat"] = GAN_Feat_loss
-
- if not self.opt.no_vgg_loss:
- G_losses["VGG"] = self.criterionVGG(fake_image, real_image) * self.opt.lambda_vgg
-
- return G_losses, fake_image
-
- def compute_discriminator_loss(self, input_semantics, degraded_image, real_image):
- D_losses = {}
- with torch.no_grad():
- fake_image, _ = self.generate_fake(input_semantics, degraded_image, real_image)
- fake_image = fake_image.detach()
- fake_image.requires_grad_()
-
- pred_fake, pred_real = self.discriminate(input_semantics, fake_image, real_image)
-
- D_losses["D_Fake"] = self.criterionGAN(pred_fake, False, for_discriminator=True)
- D_losses["D_real"] = self.criterionGAN(pred_real, True, for_discriminator=True)
-
- return D_losses
-
- def encode_z(self, real_image):
- mu, logvar = self.netE(real_image)
- z = self.reparameterize(mu, logvar)
- return z, mu, logvar
-
- def generate_fake(self, input_semantics, degraded_image, real_image, compute_kld_loss=False):
- z = None
- KLD_loss = None
- if self.opt.use_vae:
- z, mu, logvar = self.encode_z(real_image)
- if compute_kld_loss:
- KLD_loss = self.KLDLoss(mu, logvar) * self.opt.lambda_kld
-
- fake_image = self.netG(input_semantics, degraded_image, z=z)
-
- assert (
- not compute_kld_loss
- ) or self.opt.use_vae, "You cannot compute KLD loss if opt.use_vae == False"
-
- return fake_image, KLD_loss
-
- # Given fake and real image, return the prediction of discriminator
- # for each fake and real image.
-
- def discriminate(self, input_semantics, fake_image, real_image):
-
- if self.opt.no_parsing_map:
- fake_concat = fake_image
- real_concat = real_image
- else:
- fake_concat = torch.cat([input_semantics, fake_image], dim=1)
- real_concat = torch.cat([input_semantics, real_image], dim=1)
-
- # In Batch Normalization, the fake and real images are
- # recommended to be in the same batch to avoid disparate
- # statistics in fake and real images.
- # So both fake and real images are fed to D all at once.
- fake_and_real = torch.cat([fake_concat, real_concat], dim=0)
-
- discriminator_out = self.netD(fake_and_real)
-
- pred_fake, pred_real = self.divide_pred(discriminator_out)
-
- return pred_fake, pred_real
-
- # Take the prediction of fake and real images from the combined batch
- def divide_pred(self, pred):
- # the prediction contains the intermediate outputs of multiscale GAN,
- # so it's usually a list
- if type(pred) == list:
- fake = []
- real = []
- for p in pred:
- fake.append([tensor[: tensor.size(0) // 2] for tensor in p])
- real.append([tensor[tensor.size(0) // 2 :] for tensor in p])
- else:
- fake = pred[: pred.size(0) // 2]
- real = pred[pred.size(0) // 2 :]
-
- return fake, real
-
- def get_edges(self, t):
- edge = self.ByteTensor(t.size()).zero_()
- edge[:, :, :, 1:] = edge[:, :, :, 1:] | (t[:, :, :, 1:] != t[:, :, :, :-1])
- edge[:, :, :, :-1] = edge[:, :, :, :-1] | (t[:, :, :, 1:] != t[:, :, :, :-1])
- edge[:, :, 1:, :] = edge[:, :, 1:, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
- edge[:, :, :-1, :] = edge[:, :, :-1, :] | (t[:, :, 1:, :] != t[:, :, :-1, :])
- return edge.float()
-
- def reparameterize(self, mu, logvar):
- std = torch.exp(0.5 * logvar)
- eps = torch.randn_like(std)
- return eps.mul(std) + mu
-
- def use_gpu(self):
- return len(self.opt.gpu_ids) > 0
diff --git a/spaces/Mahiruoshi/MyGO_VIts-bert/bert/bert-base-japanese-v3/README.md b/spaces/Mahiruoshi/MyGO_VIts-bert/bert/bert-base-japanese-v3/README.md
deleted file mode 100644
index 630f3eca8d375fe0242a0b5c6d2686e2241a780f..0000000000000000000000000000000000000000
--- a/spaces/Mahiruoshi/MyGO_VIts-bert/bert/bert-base-japanese-v3/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
----
-license: apache-2.0
-datasets:
-- cc100
-- wikipedia
-language:
-- ja
-widget:
-- text: 東北大学で[MASK]の研究をしています。
----
-
-# BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)
-
-This is a [BERT](https://github.com/google-research/bert) model pretrained on texts in the Japanese language.
-
-This version of the model processes input texts with word-level tokenization based on the Unidic 2.1.2 dictionary (available in [unidic-lite](https://pypi.org/project/unidic-lite/) package), followed by the WordPiece subword tokenization.
-Additionally, the model is trained with the whole word masking enabled for the masked language modeling (MLM) objective.
-
-The codes for the pretraining are available at [cl-tohoku/bert-japanese](https://github.com/cl-tohoku/bert-japanese/).
-
-## Model architecture
-
-The model architecture is the same as the original BERT base model; 12 layers, 768 dimensions of hidden states, and 12 attention heads.
-
-## Training Data
-
-The model is trained on the Japanese portion of [CC-100 dataset](https://data.statmt.org/cc-100/) and the Japanese version of Wikipedia.
-For Wikipedia, we generated a text corpus from the [Wikipedia Cirrussearch dump file](https://dumps.wikimedia.org/other/cirrussearch/) as of January 2, 2023.
-The corpus files generated from CC-100 and Wikipedia are 74.3GB and 4.9GB in size and consist of approximately 392M and 34M sentences, respectively.
-
-For the purpose of splitting texts into sentences, we used [fugashi](https://github.com/polm/fugashi) with [mecab-ipadic-NEologd](https://github.com/neologd/mecab-ipadic-neologd) dictionary (v0.0.7).
-
-## Tokenization
-
-The texts are first tokenized by MeCab with the Unidic 2.1.2 dictionary and then split into subwords by the WordPiece algorithm.
-The vocabulary size is 32768.
-
-We used [fugashi](https://github.com/polm/fugashi) and [unidic-lite](https://github.com/polm/unidic-lite) packages for the tokenization.
-
-## Training
-
-We trained the model first on the CC-100 corpus for 1M steps and then on the Wikipedia corpus for another 1M steps.
-For training of the MLM (masked language modeling) objective, we introduced whole word masking in which all of the subword tokens corresponding to a single word (tokenized by MeCab) are masked at once.
-
-For training of each model, we used a v3-8 instance of Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/).
-
-## Licenses
-
-The pretrained models are distributed under the Apache License 2.0.
-
-## Acknowledgments
-
-This model is trained with Cloud TPUs provided by [TPU Research Cloud](https://sites.research.google/trc/about/) program.
\ No newline at end of file
diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/utils/se_layer.py b/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/utils/se_layer.py
deleted file mode 100644
index 083bd7d1ccee909c900c7aed2cc928bf14727f3e..0000000000000000000000000000000000000000
--- a/spaces/Mellow-ai/PhotoAI_Mellow/annotator/uniformer/mmseg/models/utils/se_layer.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import annotator.uniformer.mmcv as mmcv
-import torch.nn as nn
-from annotator.uniformer.mmcv.cnn import ConvModule
-
-from .make_divisible import make_divisible
-
-
-class SELayer(nn.Module):
- """Squeeze-and-Excitation Module.
-
- Args:
- channels (int): The input (and output) channels of the SE layer.
- ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
- ``int(channels/ratio)``. Default: 16.
- conv_cfg (None or dict): Config dict for convolution layer.
- Default: None, which means using conv2d.
- act_cfg (dict or Sequence[dict]): Config dict for activation layer.
- If act_cfg is a dict, two activation layers will be configured
- by this dict. If act_cfg is a sequence of dicts, the first
- activation layer will be configured by the first dict and the
- second activation layer will be configured by the second dict.
- Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
- divisor=6.0)).
- """
-
- def __init__(self,
- channels,
- ratio=16,
- conv_cfg=None,
- act_cfg=(dict(type='ReLU'),
- dict(type='HSigmoid', bias=3.0, divisor=6.0))):
- super(SELayer, self).__init__()
- if isinstance(act_cfg, dict):
- act_cfg = (act_cfg, act_cfg)
- assert len(act_cfg) == 2
- assert mmcv.is_tuple_of(act_cfg, dict)
- self.global_avgpool = nn.AdaptiveAvgPool2d(1)
- self.conv1 = ConvModule(
- in_channels=channels,
- out_channels=make_divisible(channels // ratio, 8),
- kernel_size=1,
- stride=1,
- conv_cfg=conv_cfg,
- act_cfg=act_cfg[0])
- self.conv2 = ConvModule(
- in_channels=make_divisible(channels // ratio, 8),
- out_channels=channels,
- kernel_size=1,
- stride=1,
- conv_cfg=conv_cfg,
- act_cfg=act_cfg[1])
-
- def forward(self, x):
- out = self.global_avgpool(x)
- out = self.conv1(out)
- out = self.conv2(out)
- return x * out
diff --git a/spaces/Menna2211/ImCaptioning/pages/image-captioning.py b/spaces/Menna2211/ImCaptioning/pages/image-captioning.py
deleted file mode 100644
index 55cd8d491982883341f7a55626f998e521908219..0000000000000000000000000000000000000000
--- a/spaces/Menna2211/ImCaptioning/pages/image-captioning.py
+++ /dev/null
@@ -1,116 +0,0 @@
-from transformers import BertTokenizer
-import torch
-import time
-import streamlit as st
-from PIL import Image
-import torchvision.transforms as transforms
-import requests
-from transformers import BlipProcessor, BlipForConditionalGeneration
-
-
-tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
-start_token = tokenizer.convert_tokens_to_ids(tokenizer._cls_token)
-end_token = tokenizer.convert_tokens_to_ids(tokenizer._sep_token)
-def create_caption_and_mask(start_token, max_length):
- caption_template = torch.zeros((1, max_length), dtype=torch.long)
- mask_template = torch.ones((1, max_length), dtype=torch.bool)
- caption_template[:, 0] = start_token
- mask_template[:, 0] = False
- return caption_template, mask_template
-
-caption, cap_mask = create_caption_and_mask(start_token, 128)
-
-# Model 1
-@st.cache_resource(show_spinner=False ,ttl=3600)
-def get_model1():
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
- model1 = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
- return processor, model1
-
-processor, model1 =get_model1()
-
-# Model 2
-@st.cache_resource(show_spinner=False ,ttl=3600)
-def get_model2():
- model2 = torch.hub.load('saahiluppal/catr', 'v3', pretrained=True) # you can choose between v1, v2 and v3
- return model2
-
-model2 =get_model2()
-
-st.title("Image Captioning App")
-# define the layout of your app
-uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
-model = st.selectbox("Select a Model", ["Select a Model","Hugging-Face", "Github"])
-submit_button = st.button("Compute")
-if model == "Select a Model" and not submit_button :
- st.stop()
-elif model == "Select a Model" and submit_button :
- st.warning('Warning.....!!,Plz..... Select a Model ', icon="⚠️")
-
-if model == "Hugging-Face":
- if uploaded_file is not None :
- # Load the uploaded image
- image = Image.open(uploaded_file).convert('RGB')
- st.image(image)
- # Use the pre-trained model to generate a caption for the uploaded image
- progress_text = "Operation in progress. Please wait."
- bar = st.progress(0, text=progress_text)
- for percent_complete in range(100):
- inputs = processor(image, return_tensors="pt")
- out = model1.generate(**inputs , max_new_tokens=100)
- time.sleep(0.1)
- bar.progress(percent_complete + 1, text=progress_text)
-
- # Display the uploaded image and its generated caption
- st.write("Generated Caption:")
- st.write(processor.decode(out[0], skip_special_tokens=True))
- time.sleep(5)
- st.success('Congratulations..!! task is done ', icon="✅")
- st.balloons()
- else:
- st.error('Error...!!,Plz..... Upload an image' , icon="🚨")
-
-elif model == "Github":
- if uploaded_file is not None :
- # Load the uploaded image
- im = Image.open(uploaded_file)
- st.image(im)
- # Preprocess the input image
- transform = transforms.Compose([
- transforms.Resize((224, 224)), # Resize the image to 224x224
- transforms.ToTensor(), # Convert the image to a tensor
- transforms.Normalize( # Normalize the image
- mean=[0.485, 0.456, 0.406],
- std=[0.229, 0.224, 0.225])])
- image = transform(im).unsqueeze(0) # Add a batch dimension
- #@torch.no_grad()
- def evaluate():
- for i in range(128-1):
- predictions = model2(image, caption, cap_mask)
- predictions = predictions[:, i, :]
- predicted_id = torch.argmax(predictions, axis=-1)
-
- if predicted_id[0] == 102:
- return caption
- caption[:, i+1] = predicted_id[0]
- cap_mask[:, i+1] = False
-
- return caption
-
- # Use the pre-trained model to generate a caption for the uploaded image
- progress_text = "Operation in progress. Please wait."
- bar = st.progress(0, text=progress_text)
- for percent_complete in range(100):
- output = evaluate()
- time.sleep(0.1)
- bar.progress(percent_complete + 1, text=progress_text)
-
- # Display the uploaded image and its generated caption
- st.write("Generated Caption:")
- result = tokenizer.decode(output[0].tolist(), skip_special_tokens=True)
- st.write(result.capitalize())
- time.sleep(5)
- st.success('Congratulations...!! task is done ', icon="✅")
- st.balloons()
- else:
- st.error('Error...!!,Plz..... Upload an image' , icon="🚨")
diff --git a/spaces/MestikonAgency/README/model.py b/spaces/MestikonAgency/README/model.py
deleted file mode 100644
index 25a4bad7cf57b7d464ee1209874cfe24deaae78f..0000000000000000000000000000000000000000
--- a/spaces/MestikonAgency/README/model.py
+++ /dev/null
@@ -1,483 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
-
-import math
-from dataclasses import dataclass
-from typing import Optional, Tuple
-
-import fairscale.nn.model_parallel.initialize as fs_init
-import torch
-import torch.nn.functional as F
-from fairscale.nn.model_parallel.layers import (
- ColumnParallelLinear,
- ParallelEmbedding,
- RowParallelLinear,
-)
-from torch import nn
-
-
-@dataclass
-class ModelArgs:
- dim: int = 4096
- n_layers: int = 32
- n_heads: int = 32
- n_kv_heads: Optional[int] = None
- vocab_size: int = -1 # defined later by tokenizer
- multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
- ffn_dim_multiplier: Optional[float] = None
- norm_eps: float = 1e-5
-
- max_batch_size: int = 32
- max_seq_len: int = 2048
-
-
-class RMSNorm(torch.nn.Module):
- def __init__(self, dim: int, eps: float = 1e-6):
- """
- Initialize the RMSNorm normalization layer.
-
- Args:
- dim (int): The dimension of the input tensor.
- eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6.
-
- Attributes:
- eps (float): A small value added to the denominator for numerical stability.
- weight (nn.Parameter): Learnable scaling parameter.
-
- """
- super().__init__()
- self.eps = eps
- self.weight = nn.Parameter(torch.ones(dim))
-
- def _norm(self, x):
- """
- Apply the RMSNorm normalization to the input tensor.
-
- Args:
- x (torch.Tensor): The input tensor.
-
- Returns:
- torch.Tensor: The normalized tensor.
-
- """
- return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
-
- def forward(self, x):
- """
- Forward pass through the RMSNorm layer.
-
- Args:
- x (torch.Tensor): The input tensor.
-
- Returns:
- torch.Tensor: The output tensor after applying RMSNorm.
-
- """
- output = self._norm(x.float()).type_as(x)
- return output * self.weight
-
-
-def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):
- """
- Precompute the frequency tensor for complex exponentials (cis) with given dimensions.
-
- This function calculates a frequency tensor with complex exponentials using the given dimension 'dim'
- and the end index 'end'. The 'theta' parameter scales the frequencies.
- The returned tensor contains complex values in complex64 data type.
-
- Args:
- dim (int): Dimension of the frequency tensor.
- end (int): End index for precomputing frequencies.
- theta (float, optional): Scaling factor for frequency computation. Defaults to 10000.0.
-
- Returns:
- torch.Tensor: Precomputed frequency tensor with complex exponentials.
-
-
-
-
- """
- freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
- t = torch.arange(end, device=freqs.device) # type: ignore
- freqs = torch.outer(t, freqs).float() # type: ignore
- freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
- return freqs_cis
-
-
-def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
- """
- Reshape frequency tensor for broadcasting it with another tensor.
-
- This function reshapes the frequency tensor to have the same shape as the target tensor 'x'
- for the purpose of broadcasting the frequency tensor during element-wise operations.
-
- Args:
- freqs_cis (torch.Tensor): Frequency tensor to be reshaped.
- x (torch.Tensor): Target tensor for broadcasting compatibility.
-
- Returns:
- torch.Tensor: Reshaped frequency tensor.
-
- Raises:
- AssertionError: If the frequency tensor doesn't match the expected shape.
- AssertionError: If the target tensor 'x' doesn't have the expected number of dimensions.
- """
- ndim = x.ndim
- assert 0 <= 1 < ndim
- assert freqs_cis.shape == (x.shape[1], x.shape[-1])
- shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
- return freqs_cis.view(*shape)
-
-
-def apply_rotary_emb(
- xq: torch.Tensor,
- xk: torch.Tensor,
- freqs_cis: torch.Tensor,
-) -> Tuple[torch.Tensor, torch.Tensor]:
- """
- Apply rotary embeddings to input tensors using the given frequency tensor.
-
- This function applies rotary embeddings to the given query 'xq' and key 'xk' tensors using the provided
- frequency tensor 'freqs_cis'. The input tensors are reshaped as complex numbers, and the frequency tensor
- is reshaped for broadcasting compatibility. The resulting tensors contain rotary embeddings and are
- returned as real tensors.
-
- Args:
- xq (torch.Tensor): Query tensor to apply rotary embeddings.
- xk (torch.Tensor): Key tensor to apply rotary embeddings.
- freqs_cis (torch.Tensor): Precomputed frequency tensor for complex exponentials.
-
- Returns:
- Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings.
-
-
-
- """
- xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
- xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
- freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
- xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
- xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
- return xq_out.type_as(xq), xk_out.type_as(xk)
-
-
-def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
- """torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
- bs, slen, n_kv_heads, head_dim = x.shape
- if n_rep == 1:
- return x
- return (
- x[:, :, :, None, :]
- .expand(bs, slen, n_kv_heads, n_rep, head_dim)
- .reshape(bs, slen, n_kv_heads * n_rep, head_dim)
- )
-
-
-class Attention(nn.Module):
- """Multi-head attention module."""
- def __init__(self, args: ModelArgs):
- """
- Initialize the Attention module.
-
- Args:
- args (ModelArgs): Model configuration parameters.
-
- Attributes:
- n_kv_heads (int): Number of key and value heads.
- n_local_heads (int): Number of local query heads.
- n_local_kv_heads (int): Number of local key and value heads.
- n_rep (int): Number of repetitions for local heads.
- head_dim (int): Dimension size of each attention head.
- wq (ColumnParallelLinear): Linear transformation for queries.
- wk (ColumnParallelLinear): Linear transformation for keys.
- wv (ColumnParallelLinear): Linear transformation for values.
- wo (RowParallelLinear): Linear transformation for output.
- cache_k (torch.Tensor): Cached keys for attention.
- cache_v (torch.Tensor): Cached values for attention.
-
- """
- super().__init__()
- self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
- model_parallel_size = fs_init.get_model_parallel_world_size()
- self.n_local_heads = args.n_heads // model_parallel_size
- self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
- self.n_rep = self.n_local_heads // self.n_local_kv_heads
- self.head_dim = args.dim // args.n_heads
-
- self.wq = ColumnParallelLinear(
- args.dim,
- args.n_heads * self.head_dim,
- bias=False,
- gather_output=False,
- init_method=lambda x: x,
- )
- self.wk = ColumnParallelLinear(
- args.dim,
- self.n_kv_heads * self.head_dim,
- bias=False,
- gather_output=False,
- init_method=lambda x: x,
- )
- self.wv = ColumnParallelLinear(
- args.dim,
- self.n_kv_heads * self.head_dim,
- bias=False,
- gather_output=False,
- init_method=lambda x: x,
- )
- self.wo = RowParallelLinear(
- args.n_heads * self.head_dim,
- args.dim,
- bias=False,
- input_is_parallel=True,
- init_method=lambda x: x,
- )
-
- self.cache_k = torch.zeros(
- (
- args.max_batch_size,
- args.max_seq_len,
- self.n_local_kv_heads,
- self.head_dim,
- )
- ).cuda()
- self.cache_v = torch.zeros(
- (
- args.max_batch_size,
- args.max_seq_len,
- self.n_local_kv_heads,
- self.head_dim,
- )
- ).cuda()
-
- def forward(
- self,
- x: torch.Tensor,
- start_pos: int,
- freqs_cis: torch.Tensor,
- mask: Optional[torch.Tensor],
- ):
- """
- Forward pass of the attention module.
-
- Args:
- x (torch.Tensor): Input tensor.
- start_pos (int): Starting position for caching.
- freqs_cis (torch.Tensor): Precomputed frequency tensor.
- mask (torch.Tensor, optional): Attention mask tensor.
-
- Returns:
- torch.Tensor: Output tensor after attention.
-
- """
- bsz, seqlen, _ = x.shape
- xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
-
- xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
- xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
- xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
-
- xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
-
- self.cache_k = self.cache_k.to(xq)
- self.cache_v = self.cache_v.to(xq)
-
- self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
- self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
-
- keys = self.cache_k[:bsz, : start_pos + seqlen]
- values = self.cache_v[:bsz, : start_pos + seqlen]
-
- # repeat k/v heads if n_kv_heads < n_heads
- keys = repeat_kv(keys, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
- values = repeat_kv(values, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
-
- xq = xq.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim)
- keys = keys.transpose(1, 2)
- values = values.transpose(1, 2)
- scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim)
- if mask is not None:
- scores = scores + mask # (bs, n_local_heads, seqlen, cache_len + seqlen)
- scores = F.softmax(scores.float(), dim=-1).type_as(xq)
- output = torch.matmul(scores, values) # (bs, n_local_heads, seqlen, head_dim)
- output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
- return self.wo(output)
-
-
-class FeedForward(nn.Module):
- def __init__(
- self,
- dim: int,
- hidden_dim: int,
- multiple_of: int,
- ffn_dim_multiplier: Optional[float],
- ):
- """
- Initialize the FeedForward module.
-
- Args:
- dim (int): Input dimension.
- hidden_dim (int): Hidden dimension of the feedforward layer.
- multiple_of (int): Value to ensure hidden dimension is a multiple of this value.
- ffn_dim_multiplier (float, optional): Custom multiplier for hidden dimension. Defaults to None.
-
- Attributes:
- w1 (ColumnParallelLinear): Linear transformation for the first layer.
- w2 (RowParallelLinear): Linear transformation for the second layer.
- w3 (ColumnParallelLinear): Linear transformation for the third layer.
-
- """
- super().__init__()
- hidden_dim = int(2 * hidden_dim / 3)
- # custom dim factor multiplier
- if ffn_dim_multiplier is not None:
- hidden_dim = int(ffn_dim_multiplier * hidden_dim)
- hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
-
- self.w1 = ColumnParallelLinear(
- dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x
- )
- self.w2 = RowParallelLinear(
- hidden_dim, dim, bias=False, input_is_parallel=True, init_method=lambda x: x
- )
- self.w3 = ColumnParallelLinear(
- dim, hidden_dim, bias=False, gather_output=False, init_method=lambda x: x
- )
-
- def forward(self, x):
- return self.w2(F.silu(self.w1(x)) * self.w3(x))
-
-
-class TransformerBlock(nn.Module):
- def __init__(self, layer_id: int, args: ModelArgs):
- """
- Initialize a TransformerBlock.
-
- Args:
- layer_id (int): Identifier for the layer.
- args (ModelArgs): Model configuration parameters.
-
- Attributes:
- n_heads (int): Number of attention heads.
- dim (int): Dimension size of the model.
- head_dim (int): Dimension size of each attention head.
- attention (Attention): Attention module.
- feed_forward (FeedForward): FeedForward module.
- layer_id (int): Identifier for the layer.
- attention_norm (RMSNorm): Layer normalization for attention output.
- ffn_norm (RMSNorm): Layer normalization for feedforward output.
-
- """
- super().__init__()
- self.n_heads = args.n_heads
- self.dim = args.dim
- self.head_dim = args.dim // args.n_heads
- self.attention = Attention(args)
- self.feed_forward = FeedForward(
- dim=args.dim,
- hidden_dim=4 * args.dim,
- multiple_of=args.multiple_of,
- ffn_dim_multiplier=args.ffn_dim_multiplier,
- )
- self.layer_id = layer_id
- self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
- self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
-
- def forward(
- self,
- x: torch.Tensor,
- start_pos: int,
- freqs_cis: torch.Tensor,
- mask: Optional[torch.Tensor],
- ):
- """
- Perform a forward pass through the TransformerBlock.
-
- Args:
- x (torch.Tensor): Input tensor.
- start_pos (int): Starting position for attention caching.
- freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.
- mask (torch.Tensor, optional): Masking tensor for attention. Defaults to None.
-
- Returns:
- torch.Tensor: Output tensor after applying attention and feedforward layers.
-
- """
- h = x + self.attention.forward(
- self.attention_norm(x), start_pos, freqs_cis, mask
- )
- out = h + self.feed_forward.forward(self.ffn_norm(h))
- return out
-
-
-class Transformer(nn.Module):
- def __init__(self, params: ModelArgs):
- """
- Initialize a Transformer model.
-
- Args:
- params (ModelArgs): Model configuration parameters.
-
- Attributes:
- params (ModelArgs): Model configuration parameters.
- vocab_size (int): Vocabulary size.
- n_layers (int): Number of layers in the model.
- tok_embeddings (ParallelEmbedding): Token embeddings.
- layers (torch.nn.ModuleList): List of Transformer blocks.
- norm (RMSNorm): Layer normalization for the model output.
- output (ColumnParallelLinear): Linear layer for final output.
- freqs_cis (torch.Tensor): Precomputed cosine and sine frequencies.
-
- """
- super().__init__()
- self.params = params
- self.vocab_size = params.vocab_size
- self.n_layers = params.n_layers
-
- self.tok_embeddings = ParallelEmbedding(
- params.vocab_size, params.dim, init_method=lambda x: x
- )
-
- self.layers = torch.nn.ModuleList()
- for layer_id in range(params.n_layers):
- self.layers.append(TransformerBlock(layer_id, params))
-
- self.norm = RMSNorm(params.dim, eps=params.norm_eps)
- self.output = ColumnParallelLinear(
- params.dim, params.vocab_size, bias=False, init_method=lambda x: x
- )
-
- self.freqs_cis = precompute_freqs_cis(
- self.params.dim // self.params.n_heads, self.params.max_seq_len * 2
- )
-
- @torch.inference_mode()
- def forward(self, tokens: torch.Tensor, start_pos: int):
- """
- Perform a forward pass through the Transformer model.
-
- Args:
- tokens (torch.Tensor): Input token indices.
- start_pos (int): Starting position for attention caching.
-
- Returns:
- torch.Tensor: Output logits after applying the Transformer model.
-
- """
- _bsz, seqlen = tokens.shape
- h = self.tok_embeddings(tokens)
- self.freqs_cis = self.freqs_cis.to(h.device)
- freqs_cis = self.freqs_cis[start_pos : start_pos + seqlen]
-
- mask = None
- if seqlen > 1:
- mask = torch.full(
- (1, 1, seqlen, seqlen), float("-inf"), device=tokens.device
- )
- mask = torch.triu(mask, diagonal=start_pos + 1).type_as(h)
-
- for layer in self.layers:
- h = layer(h, start_pos, freqs_cis, mask)
- h = self.norm(h)
- output = self.output(h).float()
- return output
diff --git a/spaces/Metatron/LEO/Dockerfile b/spaces/Metatron/LEO/Dockerfile
deleted file mode 100644
index 29ec24bfb63cdbf2c92fc41c33e24b329aa6e1ca..0000000000000000000000000000000000000000
--- a/spaces/Metatron/LEO/Dockerfile
+++ /dev/null
@@ -1,65 +0,0 @@
-FROM zenmldocker/zenml-server:latest
-
-ENV ZENML_ANALYTICS_OPT_IN=true
-ENV ZENML_SERVER_DEPLOYMENT_TYPE="hf_spaces"
-ENV ZENML_LOGGING_VERBOSITY=DEBUG
-
-################################################################################
-#
-# CONFIGURING YOUR ZENML HF SPACES SERVER
-# ---------------------------------------
-# By default this space is not persistent. All ZenML metadata is stored in
-# localstorage in a SQLite database. If you would like to make your storage
-# persistent, use the appropriate environment variables below to configure the
-# image to use a MySQL-compatible database service that is reachable from the
-# container. See https://docs.zenml.io/getting-started/deploying-zenml/docker
-# for more information on how to configure these environment variables.
-
-# You can also configure the secrets store to use for your ZenML server. Be
-# sure to use Huggingface Spaces' 'Repository Secrets' feature to store any
-# secrets referenced here. See
-# https://huggingface.co/docs/hub/spaces-overview#managing-secrets for more
-# information on how to configure these environment variables.
-
-# ENV ZENML_DEFAULT_PROJECT_NAME=""
-# ENV ZENML_DEFAULT_USER_NAME=""
-# ENV ZENML_DEFAULT_USER_PASSWORD=""
-# ENV ZENML_STORE_URL=""
-# ENV ZENML_STORE_SSL_CA=""
-# ENV ZENML_STORE_SSL_CERT=""
-# ENV ZENML_STORE_SSL_KEY=""
-# ENV ZENML_STORE_SSL_VERIFY_SERVER_CERT=""
-
-# ENV ZENML_LOGGING_VERBOSITY=""
-
-# # SECRETS STORE CONFIGURATION
-# ENV ZENML_SECRETS_STORE_TYPE=""
-# ENV ZENML_SECRETS_STORE_ENCRYPTION_KEY=""
-# ENV ZENML_SECRETS_STORE_CLASS_PATH=""
-# ENV ZENML_JWT_SECRET_KEY=""
-
-# # AWS Secrets Store Configuration
-# ENV ZENML_SECRETS_STORE_REGION_NAME=""
-# ENV ZENML_SECRETS_STORE_AWS_ACCESS_KEY_ID=""
-# ENV ZENML_SECRETS_STORE_AWS_SECRET_ACCESS_KEY=""
-# ENV ZENML_SECRETS_STORE_AWS_SESSION_TOKEN=""
-# ENV ZENML_SECRETS_STORE_SECRET_LIST_REFRESH_TIMEOUT=""
-
-# # GCP Secrets Store Configuration
-# ENV ZENML_SECRETS_STORE_PROJECT_ID=""
-# ENV GOOGLE_APPLICATION_CREDENTIALS=""
-
-# # Azure Secrets Store Configuration
-# ENV ZENML_SECRETS_STORE_KEY_VAULT_NAME=""
-# ENV ZENML_SECRETS_STORE_AZURE_CLIENT_ID=""
-# ENV ZENML_SECRETS_STORE_AZURE_CLIENT_SECRET=""
-# ENV ZENML_SECRETS_STORE_AZURE_TENANT_ID=""
-
-# # Hashicorp Secrets Store Configuration
-# ENV ZENML_SECRETS_STORE_VAULT_ADDR=""
-# ENV ZENML_SECRETS_STORE_VAULT_TOKEN=""
-# ENV ZENML_SECRETS_STORE_VAULT_NAMESPACE=""
-# ENV ZENML_SECRETS_STORE_MAX_VERSIONS=""
-
-ENTRYPOINT ["uvicorn", "zenml.zen_server.zen_server_api:app", "--log-level", "debug"]
-CMD ["--proxy-headers", "--port", "8080", "--host", "0.0.0.0"]
diff --git a/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/__init__.py b/spaces/MisterZee/PIFu-Clothed-Human-Digitization/PIFu/lib/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/Mountchicken/MAERec-Gradio/tools/dist_test.sh b/spaces/Mountchicken/MAERec-Gradio/tools/dist_test.sh
deleted file mode 100644
index dea131b43ea8f1222661d20603d40c18ea7f28a1..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/tools/dist_test.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env bash
-
-CONFIG=$1
-CHECKPOINT=$2
-GPUS=$3
-NNODES=${NNODES:-1}
-NODE_RANK=${NODE_RANK:-0}
-PORT=${PORT:-29500}
-MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
-
-PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
-python -m torch.distributed.launch \
- --nnodes=$NNODES \
- --node_rank=$NODE_RANK \
- --master_addr=$MASTER_ADDR \
- --nproc_per_node=$GPUS \
- --master_port=$PORT \
- $(dirname "$0")/test.py \
- $CONFIG \
- $CHECKPOINT \
- --launcher pytorch \
- ${@:4}
diff --git a/spaces/MrSinan/Reconstruction/fit_ellipse.py b/spaces/MrSinan/Reconstruction/fit_ellipse.py
deleted file mode 100644
index c17e1201bbb3ab8ae5484a417e657abac194d2fc..0000000000000000000000000000000000000000
--- a/spaces/MrSinan/Reconstruction/fit_ellipse.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Author: aqeelanwar
-# Created: 4 May,2020, 1:30 AM
-# Email: aqeel.anwar@gatech.edu
-
-import numpy as np
-from numpy.linalg import eig, inv
-
-def fitEllipse(x,y):
- x = x[:,np.newaxis]
- y = y[:,np.newaxis]
- D = np.hstack((x*x, x*y, y*y, x, y, np.ones_like(x)))
- S = np.dot(D.T,D)
- C = np.zeros([6,6])
- C[0,2] = C[2,0] = 2; C[1,1] = -1
- E, V = eig(np.dot(inv(S), C))
- n = np.argmax(np.abs(E))
- a = V[:,n]
- return a
-
-def ellipse_center(a):
- b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
- num = b*b-a*c
- x0=(c*d-b*f)/num
- y0=(a*f-b*d)/num
- return np.array([x0,y0])
-
-
-def ellipse_angle_of_rotation( a ):
- b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
- return 0.5*np.arctan(2*b/(a-c))
-
-
-def ellipse_axis_length( a ):
- b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
- up = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)
- down1=(b*b-a*c)*( (c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
- down2=(b*b-a*c)*( (a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))
- res1=np.sqrt(up/down1)
- res2=np.sqrt(up/down2)
- return np.array([res1, res2])
-
-def ellipse_angle_of_rotation2( a ):
- b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0]
- if b == 0:
- if a > c:
- return 0
- else:
- return np.pi/2
- else:
- if a > c:
- return np.arctan(2*b/(a-c))/2
- else:
- return np.pi/2 + np.arctan(2*b/(a-c))/2
-
-# a = fitEllipse(x,y)
-# center = ellipse_center(a)
-# #phi = ellipse_angle_of_rotation(a)
-# phi = ellipse_angle_of_rotation2(a)
-# axes = ellipse_axis_length(a)
-#
-# print("center = ", center)
-# print("angle of rotation = ", phi)
-# print("axes = ", axes)
-
diff --git a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/box_coder.py b/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/box_coder.py
deleted file mode 100644
index f20ac956dfbce1fa69d1b9e6f5b023b704e1ec8a..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/vision/detection/utils/object_detection/box_coder.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-"""Base box coder.
-
-Box coders convert between coordinate frames, namely image-centric
-(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
-defined by a specific anchor).
-
-Users of a BoxCoder can call two methods:
- encode: which encodes a box with respect to a given anchor
- (or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
- decode: which inverts this encoding with a decode operation.
-In both cases, the arguments are assumed to be in 1-1 correspondence already;
-it is not the job of a BoxCoder to perform matching.
-"""
-from abc import ABCMeta
-from abc import abstractmethod
-from abc import abstractproperty
-
-import tensorflow as tf
-
-
-# Box coder types.
-FASTER_RCNN = 'faster_rcnn'
-KEYPOINT = 'keypoint'
-MEAN_STDDEV = 'mean_stddev'
-SQUARE = 'square'
-
-
-class BoxCoder(object):
- """Abstract base class for box coder."""
- __metaclass__ = ABCMeta
-
- @abstractproperty
- def code_size(self):
- """Return the size of each code.
-
- This number is a constant and should agree with the output of the `encode`
- op (e.g. if rel_codes is the output of self.encode(...), then it should have
- shape [N, code_size()]). This abstractproperty should be overridden by
- implementations.
-
- Returns:
- an integer constant
- """
- pass
-
- def encode(self, boxes, anchors):
- """Encode a box list relative to an anchor collection.
-
- Args:
- boxes: BoxList holding N boxes to be encoded
- anchors: BoxList of N anchors
-
- Returns:
- a tensor representing N relative-encoded boxes
- """
- with tf.name_scope('Encode'):
- return self._encode(boxes, anchors)
-
- def decode(self, rel_codes, anchors):
- """Decode boxes that are encoded relative to an anchor collection.
-
- Args:
- rel_codes: a tensor representing N relative-encoded boxes
- anchors: BoxList of anchors
-
- Returns:
- boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
- with corners y_min, x_min, y_max, x_max)
- """
- with tf.name_scope('Decode'):
- return self._decode(rel_codes, anchors)
-
- @abstractmethod
- def _encode(self, boxes, anchors):
- """Method to be overriden by implementations.
-
- Args:
- boxes: BoxList holding N boxes to be encoded
- anchors: BoxList of N anchors
-
- Returns:
- a tensor representing N relative-encoded boxes
- """
- pass
-
- @abstractmethod
- def _decode(self, rel_codes, anchors):
- """Method to be overriden by implementations.
-
- Args:
- rel_codes: a tensor representing N relative-encoded boxes
- anchors: BoxList of anchors
-
- Returns:
- boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
- with corners y_min, x_min, y_max, x_max)
- """
- pass
-
-
-def batch_decode(encoded_boxes, box_coder, anchors):
- """Decode a batch of encoded boxes.
-
- This op takes a batch of encoded bounding boxes and transforms
- them to a batch of bounding boxes specified by their corners in
- the order of [y_min, x_min, y_max, x_max].
-
- Args:
- encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
- code_size] representing the location of the objects.
- box_coder: a BoxCoder object.
- anchors: a BoxList of anchors used to encode `encoded_boxes`.
-
- Returns:
- decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
- coder_size] representing the corners of the objects in the order
- of [y_min, x_min, y_max, x_max].
-
- Raises:
- ValueError: if batch sizes of the inputs are inconsistent, or if
- the number of anchors inferred from encoded_boxes and anchors are
- inconsistent.
- """
- encoded_boxes.get_shape().assert_has_rank(3)
- if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static():
- raise ValueError('The number of anchors inferred from encoded_boxes'
- ' and anchors are inconsistent: shape[1] of encoded_boxes'
- ' %s should be equal to the number of anchors: %s.' %
- (encoded_boxes.get_shape()[1].value,
- anchors.num_boxes_static()))
-
- decoded_boxes = tf.stack([
- box_coder.decode(boxes, anchors).get()
- for boxes in tf.unstack(encoded_boxes)
- ])
- return decoded_boxes
diff --git a/spaces/NCTCMumbai/NCTC/models/research/attention_ocr/python/eval.py b/spaces/NCTCMumbai/NCTC/models/research/attention_ocr/python/eval.py
deleted file mode 100644
index ec68ad50bc25cd8528f4e9fd7976adad72782641..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/research/attention_ocr/python/eval.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# Copyright 2017 The TensorFlow Authors All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-"""Script to evaluate a trained Attention OCR model.
-
-A simple usage example:
-python eval.py
-"""
-import tensorflow as tf
-from tensorflow.contrib import slim
-from tensorflow import app
-from tensorflow.python.platform import flags
-
-import data_provider
-import common_flags
-
-FLAGS = flags.FLAGS
-common_flags.define()
-
-# yapf: disable
-flags.DEFINE_integer('num_batches', 100,
- 'Number of batches to run eval for.')
-
-flags.DEFINE_string('eval_log_dir', '/tmp/attention_ocr/eval',
- 'Directory where the evaluation results are saved to.')
-
-flags.DEFINE_integer('eval_interval_secs', 60,
- 'Frequency in seconds to run evaluations.')
-
-flags.DEFINE_integer('number_of_steps', None,
- 'Number of times to run evaluation.')
-# yapf: enable
-
-
-def main(_):
- if not tf.gfile.Exists(FLAGS.eval_log_dir):
- tf.gfile.MakeDirs(FLAGS.eval_log_dir)
-
- dataset = common_flags.create_dataset(split_name=FLAGS.split_name)
- model = common_flags.create_model(dataset.num_char_classes,
- dataset.max_sequence_length,
- dataset.num_of_views, dataset.null_code)
- data = data_provider.get_data(
- dataset,
- FLAGS.batch_size,
- augment=False,
- central_crop_size=common_flags.get_crop_size())
- endpoints = model.create_base(data.images, labels_one_hot=None)
- model.create_loss(data, endpoints)
- eval_ops = model.create_summaries(
- data, endpoints, dataset.charset, is_training=False)
- slim.get_or_create_global_step()
- session_config = tf.ConfigProto(device_count={"GPU": 0})
- slim.evaluation.evaluation_loop(
- master=FLAGS.master,
- checkpoint_dir=FLAGS.train_log_dir,
- logdir=FLAGS.eval_log_dir,
- eval_op=eval_ops,
- num_evals=FLAGS.num_batches,
- eval_interval_secs=FLAGS.eval_interval_secs,
- max_number_of_evaluations=FLAGS.number_of_steps,
- session_config=session_config)
-
-
-if __name__ == '__main__':
- app.run()
diff --git a/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/preprocessing/__init__.py b/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/preprocessing/__init__.py
deleted file mode 100644
index 8b137891791fe96927ad78e64b0aad7bded08bdc..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/research/cognitive_planning/preprocessing/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/spaces/NSect/VALL-E-X/utils/g2p/symbols.py b/spaces/NSect/VALL-E-X/utils/g2p/symbols.py
deleted file mode 100644
index 789e9df25d3d93d1976ef22d15d77f51d170ed00..0000000000000000000000000000000000000000
--- a/spaces/NSect/VALL-E-X/utils/g2p/symbols.py
+++ /dev/null
@@ -1,76 +0,0 @@
-'''
-Defines the set of symbols used in text input to the model.
-'''
-
-# japanese_cleaners
-# _pad = '_'
-# _punctuation = ',.!?-'
-# _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
-
-
-'''# japanese_cleaners2
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
-'''
-
-
-'''# korean_cleaners
-_pad = '_'
-_punctuation = ',.!?…~'
-_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
-'''
-
-'''# chinese_cleaners
-_pad = '_'
-_punctuation = ',。!?—…'
-_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
-'''
-
-# # zh_ja_mixture_cleaners
-# _pad = '_'
-# _punctuation = ',.!?-~…'
-# _letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
-
-
-'''# sanskrit_cleaners
-_pad = '_'
-_punctuation = '।'
-_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ '
-'''
-
-'''# cjks_cleaners
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ '
-'''
-
-'''# thai_cleaners
-_pad = '_'
-_punctuation = '.!? '
-_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์'
-'''
-
-# # cjke_cleaners2
-_pad = '_'
-_punctuation = ',.!?-~…'
-_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ '
-
-
-'''# shanghainese_cleaners
-_pad = '_'
-_punctuation = ',.!?…'
-_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 '
-'''
-
-'''# chinese_dialect_cleaners
-_pad = '_'
-_punctuation = ',.!?~…─'
-_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚ᴀᴇ↑↓∅ⱼ '
-'''
-
-# Export all symbols:
-symbols = [_pad] + list(_punctuation) + list(_letters)
-
-# Special symbol ids
-SPACE_ID = symbols.index(" ")
diff --git a/spaces/NiuTaipu/moe-tts-test01/monotonic_align/core.py b/spaces/NiuTaipu/moe-tts-test01/monotonic_align/core.py
deleted file mode 100644
index 1f940605fe4fd0738fa0006149fcba14ef88223a..0000000000000000000000000000000000000000
--- a/spaces/NiuTaipu/moe-tts-test01/monotonic_align/core.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import numba
-
-
-@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]),
- nopython=True, nogil=True)
-def maximum_path_jit(paths, values, t_ys, t_xs):
- b = paths.shape[0]
- max_neg_val = -1e9
- for i in range(int(b)):
- path = paths[i]
- value = values[i]
- t_y = t_ys[i]
- t_x = t_xs[i]
-
- v_prev = v_cur = 0.0
- index = t_x - 1
-
- for y in range(t_y):
- for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
- if x == y:
- v_cur = max_neg_val
- else:
- v_cur = value[y - 1, x]
- if x == 0:
- if y == 0:
- v_prev = 0.
- else:
- v_prev = max_neg_val
- else:
- v_prev = value[y - 1, x - 1]
- value[y, x] += max(v_prev, v_cur)
-
- for y in range(t_y - 1, -1, -1):
- path[y, index] = 1
- if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]):
- index = index - 1
diff --git a/spaces/Norod78/ComicsHero/app.py b/spaces/Norod78/ComicsHero/app.py
deleted file mode 100644
index 134e29a5d187d7e669ae0ecd5748aad68bc2389e..0000000000000000000000000000000000000000
--- a/spaces/Norod78/ComicsHero/app.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import os
-os.system("pip install dlib")
-import sys
-import face_detection
-import PIL
-from PIL import Image, ImageOps
-import numpy as np
-
-import torch
-torch.set_grad_enabled(False)
-net = torch.jit.load('ComicsHeroesReduced_v2E03_Traced_Script_CPU.pt')
-net.eval()
-
-
-def tensor2im(var):
- var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy()
- var = ((var + 1) / 2)
- var[var < 0] = 0
- var[var > 1] = 1
- var = var * 255
- return Image.fromarray(var.astype('uint8'))
-
-def image_as_array(image_in):
- im_array = np.array(image_in, np.float32)
- im_array = (im_array/255)*2 - 1
- im_array = np.transpose(im_array, (2, 0, 1))
- im_array = np.expand_dims(im_array, 0)
- return im_array
-
-def find_aligned_face(image_in, size=512):
- aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size)
- return aligned_image, n_faces, quad
-
-def align_first_face(image_in, size=512):
- aligned_image, n_faces, quad = find_aligned_face(image_in,size=size)
- if n_faces == 0:
- image_in = image_in.resize((size, size))
- im_array = image_as_array(image_in)
- else:
- im_array = image_as_array(aligned_image)
-
- return im_array
-
-def img_concat_h(im1, im2):
- dst = Image.new('RGB', (im1.width + im2.width, im1.height))
- dst.paste(im1, (0, 0))
- dst.paste(im2, (im1.width, 0))
- return dst
-
-import gradio as gr
-
-def face2hero(
- img: Image.Image,
- size: int
-) -> Image.Image:
-
- aligned_img = align_first_face(img)
- if aligned_img is None:
- output=None
- else:
- input = torch.Tensor(aligned_img)
- output = net(input)
- output = tensor2im(output[0])
- output = img_concat_h(tensor2im(torch.Tensor(aligned_img)[0]), output)
-
- return output
-
-import os
-import collections
-from typing import Union, List
-import numpy as np
-from PIL import Image
-import PIL.Image
-import PIL.ImageFile
-import numpy as np
-import scipy.ndimage
-import requests
-
-def inference(img):
- out = face2hero(img, 512)
- return out
-
-
-title = "Comics hero"
-description = "Turn a face into the face of a \"Comics hero\". Upload an image with a face, or click on one of the examples below. If a face could not be detected, an image will still be created."
-article = "
I'm generally interested in being a mentor because I want to help aspiring ML engineers who were in the same position I was in. Finding a job in machine learning can be really daunting and stressful and I would love to help make that process easier for mentees. I've found that my experiences with giving back to the community have been extremely rewarding and I want to continue doing it!
Interview
How did you hear about SM?
Mentee under Andy Seow (2019)
Career
graduated in 2019
landed job Cultivate in 2020
building and developing NLP models
integrating into the platform (MLE)
cross-functional collab
the company got acquired by a Perseptics
hunting for the next job
Mentorship experience?
took a lot of ownership / leadership at Cultivate (small company)
has an intern working under him now
What are beginners lacking?
It's a marathon, not a sprint
new grads want immediate gratification (but it takes a while)
don't want to destroy your mental health
lots of ambiguity and constantly evolving space
no one rigid role that works
Less success with cold emails now than when I was a mentee (probably because more people are doing it)
And how can you add value as a mentor?
depends on the mentee
so many different paths
Ideal mentee
MLEs are ideal
more on the younger side (new college grad)
did a good job networking - invented my own strategies
in my mentorship - Andy did not know a lot about networking - but did know a lot about ML
-
- Questions about SM?
whats has changed about advertising yourself as a mentor?
Is it still project based?
Just ISAs?
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/atticus/image-text-retrival-huster/misc/model.py b/spaces/atticus/image-text-retrival-huster/misc/model.py
deleted file mode 100644
index 9a6cc59390c47c7223d434666c53a42713b97f83..0000000000000000000000000000000000000000
--- a/spaces/atticus/image-text-retrival-huster/misc/model.py
+++ /dev/null
@@ -1,128 +0,0 @@
-"""
-****************** COPYRIGHT AND CONFIDENTIALITY INFORMATION ******************
-Copyright (c) 2018 [Thomson Licensing]
-All Rights Reserved
-This program contains proprietary information which is a trade secret/business \
-secret of [Thomson Licensing] and is protected, even if unpublished, under \
-applicable Copyright laws (including French droit d'auteur) and/or may be \
-subject to one or more patent(s).
-Recipient is to retain this program in confidence and is not permitted to use \
-or make copies thereof other than as permitted in a written agreement with \
-[Thomson Licensing] unless otherwise expressly allowed by applicable laws or \
-by [Thomson Licensing] under express agreement.
-Thomson Licensing is a company of the group TECHNICOLOR
-*******************************************************************************
-This scripts permits one to reproduce training and experiments of:
- Engilberge, M., Chevallier, L., Pérez, P., & Cord, M. (2018, April).
- Finding beans in burgers: Deep semantic-visual embedding with localization.
- In Proceedings of CVPR (pp. 3984-3993)
-
-Author: Martin Engilberge
-"""
-
-import torch
-import torch.nn as nn
-
-from misc.config import path
-from misc.weldonModel import ResNet_weldon
-from sru import SRU
-
-
-class SruEmb(nn.Module):
- def __init__(self, nb_layer, dim_in, dim_out, dropout=0.25):
- super(SruEmb, self).__init__()
-
- self.dim_out = dim_out
- # SRU 作为文本特征提取
- self.rnn = SRU(dim_in, dim_out, num_layers=nb_layer,
- dropout=dropout, rnn_dropout=dropout,
- use_tanh=True, has_skip_term=True,
- v1=True, rescale=False)
-
- def _select_last(self, x, lengths):
- batch_size = x.size(0)
- mask = x.data.new().resize_as_(x.data).fill_(0)
- for i in range(batch_size):
- mask[i][lengths[i] - 1].fill_(1)
- x = x.mul(mask)
- x = x.sum(1, keepdim=True).view(batch_size, self.dim_out)
- return x
-
- def _process_lengths(self, input):
- max_length = input.size(1)
- # 获取每段文本的长度
- lengths = list(
- max_length - input.data.eq(0).sum(1, keepdim=True).squeeze())
- return lengths
-
- def forward(self, input, lengths=None):
- if lengths is None:
- lengths = self._process_lengths(input)
- x = input.permute(1, 0, 2)
- # rnn
- x, hn = self.rnn(x)
- x = x.permute(1, 0, 2)
- if lengths:
- # 用mask抹除padding部分的权重
- x = self._select_last(x, lengths)
- return x
-
-
-class img_embedding(nn.Module):
-
- def __init__(self, args):
- super(img_embedding, self).__init__()
- # 图像backbone Resnet152
- model_weldon2 = ResNet_weldon(args, pretrained=False, weldon_pretrained_path=path["WELDON_CLASSIF_PRETRAINED"])
-
- self.base_layer = nn.Sequential(*list(model_weldon2.children())[:-1])
-
- # 关掉图像侧梯度
- for param in self.base_layer.parameters():
- param.requires_grad = False
-
- def forward(self, x):
- x = self.base_layer(x)
- x = x.view(x.size()[0], -1)
-
- return x
-
- # 图像激活图
- def get_activation_map(self, x):
- x = self.base_layer[0](x)
- act_map = self.base_layer[1](x)
- act = self.base_layer[2](act_map)
- return act, act_map
-
-
-class joint_embedding(nn.Module):
-
- def __init__(self, args):
- super(joint_embedding, self).__init__()
- # 图像编码
- self.img_emb = torch.nn.DataParallel(img_embedding(args))
- # 描述编码
- self.cap_emb = SruEmb(args.sru, 620, args.dimemb)
- # 全连接
- self.fc = torch.nn.DataParallel(nn.Linear(2400, args.dimemb, bias=True))
- # dropout层
- self.dropout = torch.nn.Dropout(p=0.5)
-
- def forward(self, imgs, caps, lengths):
- # 图像侧
- if imgs is not None:
- x_imgs = self.img_emb(imgs)
- x_imgs = self.dropout(x_imgs)
- x_imgs = self.fc(x_imgs)
- x_imgs = x_imgs / torch.norm(x_imgs, 2, dim=1, keepdim=True).expand_as(x_imgs)
- else:
- x_imgs = None
-
- # 描述侧
- if caps is not None:
- x_caps = self.cap_emb(caps, lengths=lengths)
- x_caps = x_caps / torch.norm(x_caps, 2, dim=1, keepdim=True).expand_as(x_caps)
- else:
- x_caps = None
-
- return x_imgs, x_caps
diff --git a/spaces/awacke1/MadLibs/app.py b/spaces/awacke1/MadLibs/app.py
deleted file mode 100644
index 246aef9595f306ccf40b907b6ec345620c15429e..0000000000000000000000000000000000000000
--- a/spaces/awacke1/MadLibs/app.py
+++ /dev/null
@@ -1,97 +0,0 @@
-
-import json
-import random
-import re
-import dataclasses
-
-import streamlit as st
-
-from gamestate import persistent_game_state
-
-st.markdown("""MAD LIBS
-(dataset is from [a Microsoft EMNLP paper](https://www.microsoft.com/en-us/download/details.aspx?id=55593))
-Fill in all the fields then click "Generate Story".
-""")
-
-
-with open('stories.json') as f:
- stories = json.load(f)
-
-
-@dataclasses.dataclass
-class GameState:
- story: str
- game_number: int = 0
-
-state = persistent_game_state(initial_state=GameState(random.choice(stories)))
-
-
-if st.button("new story"):
- state.story = random.choice(stories)
- state.game_number += 1
-
-pos = {
- 'cc': 'Coordinating conjunction',
- 'cd': 'Cardinal number',
- 'dt': 'Determiner',
- 'ex': 'Existential there',
- 'fw': 'Foreign word',
- 'in': 'Preposition or subordinating conjunction',
- 'jj': 'Adjective',
- 'jjr': 'Adjective, comparative',
- 'jjs': 'Adjective, superlative',
- 'ls': 'List item marker',
- 'md': 'Modal',
- 'nn': 'Noun, singular or mass',
- 'nns': 'Noun, plural',
- 'nnp': 'Proper noun, singular',
- 'nnps': 'Proper noun, plural',
- 'pdt': 'Predeterminer',
- 'pos': 'Possessive ending',
- 'prp': 'Personal pronoun',
- 'prp$': 'Possessive pronoun',
- 'rb': 'Adverb',
- 'rbr': 'Adverb, comparative',
- 'rbs': 'Adverb, superlative',
- 'rp': 'Particle',
- 'sym': 'Symbol',
- 'to': 'to',
- 'uh': 'Interjection',
- 'vb': 'Verb, base form',
- 'vbd': 'Verb, past tense',
- 'vbg': 'Verb, gerund or present participle',
- 'vbn': 'Verb, past participle',
- 'vbp': 'Verb, non-3rd person singular present',
- 'vbz': 'Verb, 3rd person singular present',
- 'wdt': 'Wh-determiner',
- 'wp': 'Wh-pronoun',
- 'wp$': 'Possessive wh-pronoun',
- 'wrb': 'Wh-adverb',
- # others
- 'animal': 'Animal',
- 'body': 'Body part',
- 'body_plural': 'Body part, plural',
- 'food': 'Food',
- 'liquid': 'Liquid',
- }
-
-
-regex = "<.*?::(.*?)/>"
-
-parts = re.split(regex, state.story)
-
-outparts = []
-
-for i, part in enumerate(parts):
- if i % 2 == 1:
- # remove ':'
- part = part.strip(':')
- # use two-part key so that new stories get new text boxes
- answer = st.text_input(pos.get(part, part), key=(state.game_number, i))
-
- outparts.append(f"**{answer}**" if answer else "")
- else:
- outparts.append(part)
-
-if all(outparts) and st.button("generate madlib"):
- st.markdown("".join(outparts))
\ No newline at end of file
diff --git a/spaces/awacke1/sileod-deberta-v3-base-tasksource-nli/app.py b/spaces/awacke1/sileod-deberta-v3-base-tasksource-nli/app.py
deleted file mode 100644
index a18a062169db4d4150d42560a2a9622ef65585b0..0000000000000000000000000000000000000000
--- a/spaces/awacke1/sileod-deberta-v3-base-tasksource-nli/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# original:
-import gradio as gr
-gr.Interface.load("models/sileod/deberta-v3-base-tasksource-nli").launch()
diff --git a/spaces/ayaanzaveri/detr/app.py b/spaces/ayaanzaveri/detr/app.py
deleted file mode 100644
index 93fc176d4133a99872bc28745bcacc39ccf24b14..0000000000000000000000000000000000000000
--- a/spaces/ayaanzaveri/detr/app.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from transformers import AutoFeatureExtractor, AutoModelForObjectDetection
-import matplotlib.pyplot as plt
-import matplotlib.patches as patches
-from random import choice
-from PIL import Image
-import os
-from matplotlib import rcParams, font_manager
-import streamlit as st
-import urllib.request
-import requests
-
-extractor = AutoFeatureExtractor.from_pretrained("facebook/detr-resnet-50")
-
-model = AutoModelForObjectDetection.from_pretrained("facebook/detr-resnet-50")
-
-from transformers import pipeline
-
-pipe = pipeline('object-detection', model=model, feature_extractor=extractor)
-
-img_url = st.text_input('Image URL', 'https://images.unsplash.com/photo-1556911220-bff31c812dba?ixlib=rb-4.0.3&ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&auto=format&fit=crop&w=2468&q=80')
-
-st.caption('Downloading Image...')
-
-img_data = requests.get(img_url).content
-with open('detect.jpg', 'wb') as handler:
- handler.write(img_data)
-
-st.caption('Running Detection...')
-
-output = pipe(img_url)
-
-st.caption('Adding Predictions to Image...')
-
-fpath = "Poppins-SemiBold.ttf"
-prop = font_manager.FontProperties(fname=fpath)
-
-img = Image.open('detect.jpg')
-plt.figure(dpi=2400)
-
-# Create figure and axes
-fig, ax = plt.subplots()
-
-# Display the image
-ax.imshow(img)
-
-colors = ["#ef4444", "#f97316", "#eab308", "#84cc16", "#06b6d4", "#6366f1"]
-
-# Create a Rectangle patch
-for prediction in output:
- selected_color = choice(colors)
- x, y, w, h = prediction['box']['xmin'], prediction['box']['ymin'], prediction['box']['xmax'] - prediction['box']['xmin'], prediction['box']['ymax'] - prediction['box']['ymin']
- rect = patches.FancyBboxPatch((x, y), w, h, linewidth=1.25, edgecolor=selected_color, facecolor='none', boxstyle="round,pad=-0.0040,rounding_size=10",)
- ax.add_patch(rect)
- plt.text(x, y-25, f"{prediction['label']}: {round(prediction['score']*100, 1)}%", fontsize=5, color=selected_color, fontproperties=prop)
-
-plt.axis('off')
-
-plt.savefig('detect-bbox.jpg', dpi=1200, bbox_inches='tight')
-
-image = Image.open('detect-bbox.jpg')
-
-st.image(image, caption='DETR Image')
-
-plt.show()
-
-st.caption('Done!')
\ No newline at end of file
diff --git a/spaces/ayushnoori/program-synthesis/strings.py b/spaces/ayushnoori/program-synthesis/strings.py
deleted file mode 100644
index 23de68fbfb15f8be8d38a888fd62b3d862c0b5b1..0000000000000000000000000000000000000000
--- a/spaces/ayushnoori/program-synthesis/strings.py
+++ /dev/null
@@ -1,112 +0,0 @@
-'''
-STRING OPERATORS
-This file contains Python classes that define the string operators for program synthesis.
-'''
-
-'''
-CLASS DEFINITIONS
-'''
-
-class StringVariable:
- '''
- Class to represent an string variable. Note that position is the position of the variable in the input.
- For example, if the input is ["a", "b", "c"] and the variable is the third element (i.e., "c"), then position = 2.
- '''
- def __init__(self, position):
- self.position = position # zero-indexed position of the variable in the arguments to program
- self.type = str # type of the variable
- self.weight = 1 # weight of the variable
-
- def evaluate(self, input = None):
-
- # check that input is not None
- if input is None:
- raise ValueError("Input is None.")
-
- # check that input is a list
- if type(input) != list:
- raise ValueError("Input is not a list.")
-
- # check that input is not empty
- if len(input) == 0:
- raise ValueError("Input is empty.")
-
- # check that position is valid
- if self.position >= len(input):
- raise ValueError(f"Position {self.position} is out of range for input of length {len(input)}.")
-
- return input[self.position]
-
- def str(self):
- return f"x{self.position}"
-
-class StringConstant:
- '''
- Class to represent an string constant.
- '''
- def __init__(self, value):
- self.value = value # value of the constant
- self.type = str # type of the constant
- self.weight = 1 # weight of the constant
-
- def evaluate(self, input = None):
- return self.value
-
- def str(self):
- return str(self.value)
-
-class Concatenate:
- '''
- Operator to concatenate two string values.
- '''
- def __init__(self):
- self.arity = 2 # number of arguments
- self.arg_types = [str, str] # argument types
- self.return_type = str # return type
- self.weight = 1 # weight
-
- def evaluate(self, x, y, input = None):
- return x + y
-
- def str(self, x, y):
- return f"Concat({x}, {y})"
-
-class Left:
- '''
- Operator to get left substring.
- '''
- def __init__(self):
- self.arity = 2 # number of arguments
- self.arg_types = [str, int] # argument types
- self.return_type = str # return type
- self.weight = 1 # weight
-
- def evaluate(self, x, y, input = None):
- return x[:y]
-
- def str(self, x, y):
- return f"Left({x}, {y})"
-
-class Right:
- '''
- Operator to get right substring.
- '''
- def __init__(self):
- self.arity = 2 # number of arguments
- self.arg_types = [str, int] # argument types
- self.return_type = str # return type
- self.weight = 1 # weight
-
- def evaluate(self, x, y, input = None):
- return x[(y * -1):]
-
- def str(self, x, y):
- return f"Right({x}, {y})"
-
-
-'''
-GLOBAL CONSTANTS
-'''
-
-# define operators
-string_operators = [Concatenate(), Left(), Right()]
\ No newline at end of file
diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/effects/ParallaxBarrierEffect.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/effects/ParallaxBarrierEffect.js
deleted file mode 100644
index 4bdfc7d887f9d3ba7592f8591a1056d4d107b92e..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/examples/js/effects/ParallaxBarrierEffect.js
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * @author mrdoob / http://mrdoob.com/
- * @author marklundin / http://mark-lundin.com/
- * @author alteredq / http://alteredqualia.com/
- */
-
-THREE.ParallaxBarrierEffect = function ( renderer ) {
-
- var _camera = new THREE.OrthographicCamera( - 1, 1, 1, - 1, 0, 1 );
-
- var _scene = new THREE.Scene();
-
- var _stereo = new THREE.StereoCamera();
-
- var _params = { minFilter: THREE.LinearFilter, magFilter: THREE.NearestFilter, format: THREE.RGBAFormat };
-
- var _renderTargetL = new THREE.WebGLRenderTarget( 512, 512, _params );
- var _renderTargetR = new THREE.WebGLRenderTarget( 512, 512, _params );
-
- var _material = new THREE.ShaderMaterial( {
-
- uniforms: {
-
- "mapLeft": { value: _renderTargetL.texture },
- "mapRight": { value: _renderTargetR.texture }
-
- },
-
- vertexShader: [
-
- "varying vec2 vUv;",
-
- "void main() {",
-
- " vUv = vec2( uv.x, uv.y );",
- " gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );",
-
- "}"
-
- ].join( "\n" ),
-
- fragmentShader: [
-
- "uniform sampler2D mapLeft;",
- "uniform sampler2D mapRight;",
- "varying vec2 vUv;",
-
- "void main() {",
-
- " vec2 uv = vUv;",
-
- " if ( ( mod( gl_FragCoord.y, 2.0 ) ) > 1.00 ) {",
-
- " gl_FragColor = texture2D( mapLeft, uv );",
-
- " } else {",
-
- " gl_FragColor = texture2D( mapRight, uv );",
-
- " }",
-
- "}"
-
- ].join( "\n" )
-
- } );
-
- var mesh = new THREE.Mesh( new THREE.PlaneBufferGeometry( 2, 2 ), _material );
- _scene.add( mesh );
-
- this.setSize = function ( width, height ) {
-
- renderer.setSize( width, height );
-
- var pixelRatio = renderer.getPixelRatio();
-
- _renderTargetL.setSize( width * pixelRatio, height * pixelRatio );
- _renderTargetR.setSize( width * pixelRatio, height * pixelRatio );
-
- };
-
- this.render = function ( scene, camera ) {
-
- scene.updateMatrixWorld();
-
- if ( camera.parent === null ) camera.updateMatrixWorld();
-
- _stereo.update( camera );
-
- renderer.setRenderTarget( _renderTargetL );
- renderer.clear();
- renderer.render( scene, _stereo.cameraL );
-
- renderer.setRenderTarget( _renderTargetR );
- renderer.clear();
- renderer.render( scene, _stereo.cameraR );
-
- renderer.setRenderTarget( null );
- renderer.render( _scene, _camera );
-
- };
-
-};
diff --git a/spaces/bankholdup/stylegan_petbreeder/e4e/editings/latent_editor.py b/spaces/bankholdup/stylegan_petbreeder/e4e/editings/latent_editor.py
deleted file mode 100644
index 4bebca2f5c86f71b58fa1f30d24bfcb0da06d88f..0000000000000000000000000000000000000000
--- a/spaces/bankholdup/stylegan_petbreeder/e4e/editings/latent_editor.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import torch
-import sys
-sys.path.append(".")
-sys.path.append("..")
-from editings import ganspace, sefa
-from utils.common import tensor2im
-
-
-class LatentEditor(object):
- def __init__(self, stylegan_generator, is_cars=False):
- self.generator = stylegan_generator
- self.is_cars = is_cars # Since the cars StyleGAN output is 384x512, there is a need to crop the 512x512 output.
-
- def apply_ganspace(self, latent, ganspace_pca, edit_directions):
- edit_latents = ganspace.edit(latent, ganspace_pca, edit_directions)
- return self._latents_to_image(edit_latents)
-
- def apply_interfacegan(self, latent, direction, factor=1, factor_range=None):
- edit_latents = []
- if factor_range is not None: # Apply a range of editing factors. for example, (-5, 5)
- for f in range(*factor_range):
- edit_latent = latent + f * direction
- edit_latents.append(edit_latent)
- edit_latents = torch.cat(edit_latents)
- else:
- edit_latents = latent + factor * direction
- return self._latents_to_image(edit_latents)
-
- def apply_sefa(self, latent, indices=[2, 3, 4, 5], **kwargs):
- edit_latents = sefa.edit(self.generator, latent, indices, **kwargs)
- return self._latents_to_image(edit_latents)
-
- # Currently, in order to apply StyleFlow editings, one should run inference,
- # save the latent codes and load them form the official StyleFlow repository.
- # def apply_styleflow(self):
- # pass
-
- def _latents_to_image(self, latents):
- with torch.no_grad():
- images, _ = self.generator([latents], randomize_noise=False, input_is_latent=True)
- if self.is_cars:
- images = images[:, :, 64:448, :] # 512x512 -> 384x512
- horizontal_concat_image = torch.cat(list(images), 2)
- final_image = tensor2im(horizontal_concat_image)
- return final_image
diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_ffhq_degradation_dataset.py b/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_ffhq_degradation_dataset.py
deleted file mode 100644
index fa56c03fb8e23df26aa6ed8442a86b3c676eec78..0000000000000000000000000000000000000000
--- a/spaces/beihai/GFPGAN-V1.3-whole-image/tests/test_ffhq_degradation_dataset.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import pytest
-import yaml
-
-from gfpgan.data.ffhq_degradation_dataset import FFHQDegradationDataset
-
-
-def test_ffhq_degradation_dataset():
-
- with open('tests/data/test_ffhq_degradation_dataset.yml', mode='r') as f:
- opt = yaml.load(f, Loader=yaml.FullLoader)
-
- dataset = FFHQDegradationDataset(opt)
- assert dataset.io_backend_opt['type'] == 'disk' # io backend
- assert len(dataset) == 1 # whether to read correct meta info
- assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations
- assert dataset.color_jitter_prob == 1
-
- # test __getitem__
- result = dataset.__getitem__(0)
- # check returned keys
- expected_keys = ['gt', 'lq', 'gt_path']
- assert set(expected_keys).issubset(set(result.keys()))
- # check shape and contents
- assert result['gt'].shape == (3, 512, 512)
- assert result['lq'].shape == (3, 512, 512)
- assert result['gt_path'] == 'tests/data/gt/00000000.png'
-
- # ------------------ test with probability = 0 -------------------- #
- opt['color_jitter_prob'] = 0
- opt['color_jitter_pt_prob'] = 0
- opt['gray_prob'] = 0
- opt['io_backend'] = dict(type='disk')
- dataset = FFHQDegradationDataset(opt)
- assert dataset.io_backend_opt['type'] == 'disk' # io backend
- assert len(dataset) == 1 # whether to read correct meta info
- assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations
- assert dataset.color_jitter_prob == 0
-
- # test __getitem__
- result = dataset.__getitem__(0)
- # check returned keys
- expected_keys = ['gt', 'lq', 'gt_path']
- assert set(expected_keys).issubset(set(result.keys()))
- # check shape and contents
- assert result['gt'].shape == (3, 512, 512)
- assert result['lq'].shape == (3, 512, 512)
- assert result['gt_path'] == 'tests/data/gt/00000000.png'
-
- # ------------------ test lmdb backend -------------------- #
- opt['dataroot_gt'] = 'tests/data/ffhq_gt.lmdb'
- opt['io_backend'] = dict(type='lmdb')
-
- dataset = FFHQDegradationDataset(opt)
- assert dataset.io_backend_opt['type'] == 'lmdb' # io backend
- assert len(dataset) == 1 # whether to read correct meta info
- assert dataset.kernel_list == ['iso', 'aniso'] # correct initialization the degradation configurations
- assert dataset.color_jitter_prob == 0
-
- # test __getitem__
- result = dataset.__getitem__(0)
- # check returned keys
- expected_keys = ['gt', 'lq', 'gt_path']
- assert set(expected_keys).issubset(set(result.keys()))
- # check shape and contents
- assert result['gt'].shape == (3, 512, 512)
- assert result['lq'].shape == (3, 512, 512)
- assert result['gt_path'] == '00000000'
-
- # ------------------ test with crop_components -------------------- #
- opt['crop_components'] = True
- opt['component_path'] = 'tests/data/test_eye_mouth_landmarks.pth'
- opt['eye_enlarge_ratio'] = 1.4
- opt['gt_gray'] = True
- opt['io_backend'] = dict(type='lmdb')
-
- dataset = FFHQDegradationDataset(opt)
- assert dataset.crop_components is True
-
- # test __getitem__
- result = dataset.__getitem__(0)
- # check returned keys
- expected_keys = ['gt', 'lq', 'gt_path', 'loc_left_eye', 'loc_right_eye', 'loc_mouth']
- assert set(expected_keys).issubset(set(result.keys()))
- # check shape and contents
- assert result['gt'].shape == (3, 512, 512)
- assert result['lq'].shape == (3, 512, 512)
- assert result['gt_path'] == '00000000'
- assert result['loc_left_eye'].shape == (4, )
- assert result['loc_right_eye'].shape == (4, )
- assert result['loc_mouth'].shape == (4, )
-
- # ------------------ lmdb backend should have paths ends with lmdb -------------------- #
- with pytest.raises(ValueError):
- opt['dataroot_gt'] = 'tests/data/gt'
- opt['io_backend'] = dict(type='lmdb')
- dataset = FFHQDegradationDataset(opt)
diff --git a/spaces/bigPear/digitalWDF/examples/quantized_finetune_with_local_model.sh b/spaces/bigPear/digitalWDF/examples/quantized_finetune_with_local_model.sh
deleted file mode 100644
index 209e77797ee4048996704589956f922909ec51b4..0000000000000000000000000000000000000000
--- a/spaces/bigPear/digitalWDF/examples/quantized_finetune_with_local_model.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-CUDA_VISIBLE_DEVICES=0 python ../src/finetune.py \
- --do_train \
- --model_name_or_path path_to_chatglm_model \
- --dataset alpaca_gpt4_zh \
- --dataset_dir ../data \
- --finetuning_type lora \
- --output_dir path_to_sft_checkpoint \
- --overwrite_cache \
- --per_device_train_batch_size 4 \
- --gradient_accumulation_steps 4 \
- --lr_scheduler_type cosine \
- --logging_steps 10 \
- --save_steps 1000 \
- --learning_rate 5e-5 \
- --num_train_epochs 3.0 \
- --quantization_bit 8 \
- --fp16
diff --git a/spaces/bioriAsaeru/text-to-voice/Anaganaga O Dheerudu How a 9-Year-Old Girl Saved Her Land from an Evil Queen.md b/spaces/bioriAsaeru/text-to-voice/Anaganaga O Dheerudu How a 9-Year-Old Girl Saved Her Land from an Evil Queen.md
deleted file mode 100644
index 9ed91c9b5610fb3e2dea458b971e9dfcf858d909..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Anaganaga O Dheerudu How a 9-Year-Old Girl Saved Her Land from an Evil Queen.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
Best biggest database of FREE PORN XXX movies. Start watching FREE HIGH QUALITY HD videos right now. You can watch anaganaga o dheerudu movierulz clip on your favorites from web, iPhone, Android, iPad and other your mobile phones.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/Formatter-2.9.0.4..epub Everything You Need to Know about E-book Formats and Standards.md b/spaces/bioriAsaeru/text-to-voice/Formatter-2.9.0.4..epub Everything You Need to Know about E-book Formats and Standards.md
deleted file mode 100644
index 634801426db2746b6ca6ca8330499cd8853586cf..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Formatter-2.9.0.4..epub Everything You Need to Know about E-book Formats and Standards.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/blmdsydm/faster-whisper-webui/app-network.py b/spaces/blmdsydm/faster-whisper-webui/app-network.py
deleted file mode 100644
index 4f0e565b9029761d4b995fe32a65c58d1de55f53..0000000000000000000000000000000000000000
--- a/spaces/blmdsydm/faster-whisper-webui/app-network.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Run the app with no audio file restrictions, and make it available on the network
-from app import create_ui
-from src.config import ApplicationConfig
-
-create_ui(ApplicationConfig.create_default(input_audio_max_duration=-1, server_name="0.0.0.0"))
\ No newline at end of file
diff --git a/spaces/bluelu/Product-Photo-Analyzer/process.py b/spaces/bluelu/Product-Photo-Analyzer/process.py
deleted file mode 100644
index ff21f44e4ae27a6ac18385335497c6bace72fbab..0000000000000000000000000000000000000000
--- a/spaces/bluelu/Product-Photo-Analyzer/process.py
+++ /dev/null
@@ -1,32 +0,0 @@
-import csv
-import os
-from huggingface_hub import Repository
-from datetime import datetime
-from torchvision.utils import save_image
-from datasets import load_dataset, list_datasets
-DATASET_REPO_URL = "https://huggingface.co/datasets/bluelu/bgi"
-DATA_FILENAME = "bg.csv"
-DATA_FILE = os.path.join("data", DATA_FILENAME)
-IN1 = os.path.join("data", str(datetime.now().replace(microsecond=0)) + '_0.png')
-IN2 = os.path.join("data", str(datetime.now().replace(microsecond=0)) + '_1.png')
-IN3 = os.path.join("data", str(datetime.now().replace(microsecond=0)) + '_2.png')
-HF_TOKEN = os.environ.get("S2")
-
-
-def fiximg(imgs, mode):
- print(list_datasets())
- repo = Repository(
- local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN)
-
-
- save_image(imgs[0], IN1)
- save_image(imgs[1], IN2)
- save_image(imgs[2], IN3)
- if mode is None:
- mode = 'default'
-
- with open(DATA_FILE, 'a') as file:
- writer = csv.DictWriter(file, fieldnames=["ime"])
- writer.writerow({"ime": str(datetime.now()) + mode})
-
- repo.push_to_hub()
\ No newline at end of file
diff --git a/spaces/bohmian/stock_intrinsic_value_calculator/app.py b/spaces/bohmian/stock_intrinsic_value_calculator/app.py
deleted file mode 100644
index 58fc8c94334ee96e091758a170c2a72b7fdae63c..0000000000000000000000000000000000000000
--- a/spaces/bohmian/stock_intrinsic_value_calculator/app.py
+++ /dev/null
@@ -1,358 +0,0 @@
-# Importing required modules
-import pandas as pd # for manipulating financial statements in dataframes
-import numpy as np
-import plotly.express as px # for visualizing results in interactive plots
-
-# To extract and parse fundamental data like beta and growth estimates from finviz website's HTML
-import requests
-from bs4 import BeautifulSoup as bs
-
-# For parsing financial statements data from financialmodelingprep api
-from urllib.request import urlopen
-import json
-
-def get_jsonparsed_data(url):
- response = urlopen(url)
- data = response.read().decode("utf-8")
- return json.loads(data)
-
-# For Gradio App
-import gradio as gr
-
-
-# To read the environment variable to use in API requests later
-import os
-apiKey = os.environ['FMP_API_KEY'] # the environment variable is set in HuggingFace Spaces directly
-
-
-############################################################################################################
-###### GET DATA FROM FINANCIAL MODELING PREP
-############################################################################################################
-
-# Financialmodelingprep api url
-base_url = "https://financialmodelingprep.com/api/v3/"
-
-# get financial statements using financial modelling prep API
-def get_financial_statements(ticker):
- # quarterly cash flow statements for calculating latest trailing twelve months (TTM) free cash flow
- columns_drop = ['acceptedDate', 'period', 'symbol', 'reportedCurrency', 'cik', 'fillingDate', 'depreciationAndAmortization', 'link', 'finalLink']
- q_cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey))
- q_cash_flow_statement = q_cash_flow_statement.set_index('date').drop(columns_drop, axis=1).iloc[:4] # extract for last 4 quarters
- latest_year = int(q_cash_flow_statement.iloc[0]['calendarYear'])
-
- # annual cash flow statements
- cash_flow_statement = pd.DataFrame(get_jsonparsed_data(base_url+'cash-flow-statement/' + ticker + '?apikey=' + apiKey))
- cash_flow_statement = cash_flow_statement.set_index('date').drop(columns_drop, axis=1)
-
- # combine annual and latest TTM cash flow statements
- ttm_cash_flow_statement = q_cash_flow_statement.sum() # sum up last 4 quarters to get TTM cash flow
- cash_flow_statement = cash_flow_statement[::-1].append(ttm_cash_flow_statement.rename('TTM')).drop(['netIncome'], axis=1)
- final_cash_flow_statement = cash_flow_statement[::-1] # reverse list to show most recent ones first
-
- # quarterly balance sheet statements
- columns_drop = ['acceptedDate', 'calendarYear', 'period', 'symbol', 'reportedCurrency', 'cik', 'fillingDate', 'link', 'finalLink']
- q_balance_statement = pd.DataFrame(get_jsonparsed_data(base_url+'balance-sheet-statement/' + ticker + '?period=quarter' + '&apikey=' + apiKey))
- q_balance_statement = q_balance_statement.set_index('date').drop(columns_drop, axis=1)
- q_balance_statement = q_balance_statement.apply(pd.to_numeric, errors='coerce')
-
- return q_cash_flow_statement, cash_flow_statement, final_cash_flow_statement, q_balance_statement, latest_year
-
-
-# check stability of cash flows
-def plot_cash_flow(ticker, cash_flow_statement):
- # DCF model works best only if the free cash flows are POSITIVE, STABLE and STEADILY INCREASING.
- # So let's plot the graph and verify if this is the case.
- fig_cash_flow = px.bar(cash_flow_statement , y='freeCashFlow', title=ticker + ' Free Cash Flows')
- fig_cash_flow.update_xaxes(type='category', tickangle=270, title='Date')
- fig_cash_flow.update_yaxes(title='Free Cash Flows')
- #fig_cash_flow.show()
- return fig_cash_flow
-
-
-# get ttm cash flow, most recent total debt and cash & short term investment data from statements
-def get_statements_data(final_cash_flow_statement, q_balance_statement):
- cash_flow = final_cash_flow_statement.iloc[0]['freeCashFlow'] # ttm cash flow
- total_debt = q_balance_statement.iloc[0]['totalDebt']
- cash_and_ST_investments = q_balance_statement.iloc[0]['cashAndShortTermInvestments']
- return cash_flow, total_debt, cash_and_ST_investments
-
-
-############################################################################################################
-###### GET DATA FROM FINVIZ WEBSITE
-############################################################################################################
-
-# Price, EPS next Y/5Y, Beta, Number of Shares Outstanding
-# Extract (using requests.get) and Parse (using Beautiful Soup) data from Finviz table in the Finviz website (see screenshot above), needed to calculate intrinsic value of stock.
-
-# List of data we want to extract from Finviz Table
-# Price is the current stock price
-# EPS next Y is the estimated earnings growth for next year
-# EPS next 5Y is the estimated earnings growth for next 5 years (if this is not present on finviz, we will use EPS next Y instead)
-# Beta captures the volatility of the stock, used for estimating discount rate later
-# Shs Outstand is the number of shares present in the market
-metric = ['Price', 'EPS next Y', 'EPS next 5Y', 'Beta', 'Shs Outstand']
-
-def fundamental_metric(soup, metric):
- # the table which stores the data in Finviz has html table attribute class of 'snapshot-td2'
- return soup.find_all(text = metric)[-1].find_next(class_='snapshot-td2').text
-
-# get above metrics from finviz and store as a dict
-def get_finviz_data(ticker):
- try:
- url = ("http://finviz.com/quote.ashx?t=" + ticker.lower())
- soup = bs(requests.get(url,headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0'}).content)
- dict_finviz = {}
- for m in metric:
- dict_finviz[m] = fundamental_metric(soup,m)
- for key, value in dict_finviz.items():
- # replace percentages
- if (value[-1]=='%'):
- dict_finviz[key] = value[:-1]
- dict_finviz[key] = float(dict_finviz[key])
- # billion
- if (value[-1]=='B'):
- dict_finviz[key] = value[:-1]
- dict_finviz[key] = float(dict_finviz[key])*1000000000
- # million
- if (value[-1]=='M'):
- dict_finviz[key] = value[:-1]
- dict_finviz[key] = float(dict_finviz[key])*1000000
- try:
- dict_finviz[key] = float(dict_finviz[key])
- except:
- pass
- except Exception as e:
- print (e)
- print ('Not successful parsing ' + ticker + ' data.')
- return dict_finviz
-
-
-def parse_finviz_dict(finviz_dict):
- EPS_growth_5Y = finviz_dict['EPS next 5Y']
- # sometimes EPS next 5Y is empty and shows as a '-' string, in this case use EPS next Y
- if isinstance(EPS_growth_5Y, str):
- if not EPS_growth_5Y.isdigit():
- EPS_growth_5Y = finviz_dict['EPS next Y']
- EPS_growth_6Y_to_10Y = EPS_growth_5Y/2 # Half the previous growth rate, conservative estimate
- # Long term = previous growth rate or around long term inflation rate, whichever is lower to be conservative estimate
- long_term_growth_rate = np.minimum(EPS_growth_6Y_to_10Y, 3)
- shares_outstanding = finviz_dict['Shs Outstand']
- beta = finviz_dict['Beta']
- current_price = finviz_dict['Price']
-
- return EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate, beta, shares_outstanding, current_price
-
-
-## Estimate Discount Rate from Beta
-def estimate_discount_rate(beta):
- # Beta shows the volatility of the stock,
- # the higher the beta, we want to be more conservative by increasing the discount rate also.
- discount_rate = 7
- if(beta<0.80):
- discount_rate = 5
- elif(beta>=0.80 and beta<1):
- discount_rate = 6
- elif(beta>=1 and beta<1.1):
- discount_rate = 6.5
- elif(beta>=1.1 and beta<1.2):
- discount_rate = 7
- elif(beta>=1.2 and beta<1.3):
- discount_rate = 7.5
- elif(beta>=1.3 and beta<1.4):
- discount_rate = 8
- elif(beta>=1.4 and beta<1.6):
- discount_rate = 8.5
- elif(beta>=1.61):
- discount_rate = 9
-
- return discount_rate
-
-
-############################################################################################################
-## Calculate Intrinsic Value
-############################################################################################################
-
-# 1. First Project Cash Flows from Year 1 to Year 10 using Present (TTM) Free Cash Flow
-# 2. Discount the Cash Flows to Present Value
-# 3. Calculate the Terminal Value after Year 10 (Discounted to Present Value) Assuming the Company will Grow at a Constant Steady Rate Forever (https://corporatefinanceinstitute.com/resources/financial-modeling/dcf-terminal-value-formula/)
-# 4. Add the Cash Flows and the Terminal Value Up
-# 5. Then Account for the Cash + Short Term Investments and Subtract Total Debt
-# 6. Divide by Total Number of Shares Outstanding
-
-def calculate_intrinsic_value(latest_year, cash_flow, total_debt, cash_and_ST_investments,
- EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate,
- shares_outstanding, discount_rate, current_price):
-
- # Convert all percentages to decmials
- EPS_growth_5Y_d = EPS_growth_5Y/100
- EPS_growth_6Y_to_10Y_d = EPS_growth_6Y_to_10Y/100
- long_term_growth_rate_d = long_term_growth_rate/100
- discount_rate_d = discount_rate/100
- # print("Discounted Cash Flows\n")
-
- # Lists of projected cash flows from year 1 to year 20
- cash_flow_list = []
- cash_flow_discounted_list = []
- year_list = []
-
- # 1. First Project Cash Flows from Year 1 to Year 10 using Present (TTM) Free Cash Flow
- # 2. Discount Each of the Cash Flows to Present Value
- # Years 1 to 5
- for year in range(1, 6):
- year_list.append(year + latest_year)
- cash_flow*=(1 + EPS_growth_5Y_d)
- cash_flow_list.append(cash_flow)
- cash_flow_discounted = cash_flow/((1 + discount_rate_d)**year)
- cash_flow_discounted_list.append(cash_flow_discounted)
- # print("Year " + str(year + latest_year) + ": $" + str(cash_flow_discounted)) ## Print out the projected discounted cash flows
-
- # Years 6 to 10
- for year in range(6, 11):
- year_list.append(year + latest_year)
- cash_flow*=(1 + EPS_growth_6Y_to_10Y_d)
- cash_flow_list.append(cash_flow)
- cash_flow_discounted = cash_flow/((1 + discount_rate_d)**year)
- cash_flow_discounted_list.append(cash_flow_discounted)
- # print("Year " + str(year + latest_year) + ": $" + str(cash_flow_discounted)) ## Print out the projected discounted cash flows
-
- # Store all forecasted cash flows in dataframe
- forecast_cash_flows_df = pd.DataFrame.from_dict({'Year': year_list, 'Cash Flow': cash_flow_list, 'Discounted Cash Flow': cash_flow_discounted_list})
- forecast_cash_flows_df = forecast_cash_flows_df.set_index('Year')
-
- # 3. Calculate the Terminal Value after Year 10 (Discounted to Present Value)
- # These are All Future Cash Flows Summed Up
- # Assuming the Company will Grow at a Constant Steady Rate Forever (https://corporatefinanceinstitute.com/resources/financial-modeling/dcf-terminal-value-formula/)
- # Growth in Perpuity Approach
- cashflow_10Y = cash_flow_discounted_list[-1]
- # Formula to Calculate: https://corporatefinanceinstitute.com/resources/financial-modeling/dcf-terminal-value-formula/
- terminal_value = cashflow_10Y*(1+long_term_growth_rate_d)/(discount_rate_d-long_term_growth_rate_d)
-
- # 4. Add the Cash Flows and the Terminal Value Up
- # 5. Then Account for the Cash + Short Term Investments and Subtract Total Debt
- # 6. Divide by Total Number of Shares Outstanding
- intrinsic_value = (sum(cash_flow_discounted_list) + terminal_value - total_debt + cash_and_ST_investments)/shares_outstanding
- margin_of_safety = (1-current_price/intrinsic_value)*100
-
- return forecast_cash_flows_df, terminal_value, intrinsic_value, margin_of_safety
-
-
-# Plot forecasted cash flows from years 1 to 10, as well as the discounted cash flows
-def plot_forecasted_cash_flows(ticker, forecast_cash_flows_df):
-
- fig_cash_forecast = px.bar(forecast_cash_flows_df, barmode='group', title=ticker + ' Projected Free Cash Flows')
- fig_cash_forecast.update_xaxes(type='category', tickangle=270)
- fig_cash_forecast.update_xaxes(tickangle=270, title='Forecasted Year')
- fig_cash_forecast.update_yaxes(title='Free Cash Flows')
- # fig_cash_forecast.show()
-
- return fig_cash_forecast
-
-
-# chain all the steps from the functions above together
-def run_all_steps(ticker):
- ticker = ticker.upper() # make sure ticker is caps
-
- q_cash_flow_statement, cash_flow_statement, final_cash_flow_statement, q_balance_statement, latest_year = get_financial_statements(ticker)
-
- fig_cash_flow = plot_cash_flow(ticker, cash_flow_statement)
-
- cash_flow, total_debt, cash_and_ST_investments = get_statements_data(final_cash_flow_statement, q_balance_statement)
-
- finviz_dict = get_finviz_data(ticker)
-
- EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate, beta, shares_outstanding, current_price = parse_finviz_dict(finviz_dict)
-
- discount_rate = estimate_discount_rate(beta)
-
- forecast_cash_flows_df, terminal_value, intrinsic_value, margin_of_safety = calculate_intrinsic_value(latest_year, cash_flow, total_debt, cash_and_ST_investments,
- EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate,
- shares_outstanding, discount_rate, current_price)
-
- fig_cash_forecast = plot_forecasted_cash_flows(ticker, forecast_cash_flows_df)
-
- return q_cash_flow_statement.reset_index(), final_cash_flow_statement.reset_index(), q_balance_statement.reset_index(), fig_cash_flow, \
- str(EPS_growth_5Y) + '%', str(EPS_growth_6Y_to_10Y) + '%', str(long_term_growth_rate) + '%', \
- beta, shares_outstanding, current_price, \
- str(discount_rate) + '%', forecast_cash_flows_df.reset_index(), terminal_value, intrinsic_value, fig_cash_forecast, str(margin_of_safety) + '%'
-
-
-# Gradio App and UI
-with gr.Blocks() as app:
- with gr.Row():
- gr.HTML("
Bohmian's Stock Intrinsic Value Calculator
")
-
- with gr.Row():
- ticker = gr.Textbox("AAPL", label='Enter stock ticker to calculate its intrinsic value e.g. "AAPL"')
- btn = gr.Button("Calculate Intrinsic Value")
-
- # Show intrinsic value calculation results
- with gr.Row():
- gr.HTML("
Calculated Intrinsic Value
")
-
- with gr.Row():
- intrinsic_value = gr.Text(label="Intrinsic Value (if this value is negative, it means current cash flow may be negative and this model WOULD NOT WORK, scroll down to check)")
- current_price = gr.Text(label="Actual Stock Price")
- margin_of_safety = gr.Text(label="Margin of Safety")
-
- # Show metrics obtained and estimated from FinViz website that were essential for calculations
- with gr.Row():
- gr.HTML("
Metrics Obtained (and Estimated) from FinViz Website
")
- with gr.Row():
- gr.HTML("
https://finviz.com/
")
-
- with gr.Row():
- EPS_growth_5Y = gr.Text(label="EPS Next 5Y (estimated EPS growth for next 5 years)")
- EPS_growth_6Y_to_10Y = gr.Text(label="EPS growth for 6th to 10th year (estimated as half of 5Y rate)")
- long_term_growth_rate = gr.Text(label="Long Term Growth Rate (estimated as the above or 3%, whichever is lower)")
-
- with gr.Row():
- beta = gr.Text(label="Beta (measures volatility of stock)")
- discount_rate = gr.Text(label="Discount Rate (estimated from beta)")
- shares_outstanding = gr.Text(label="Shares Outstanding")
-
-
- # Show detailed actual historical financial statements
- with gr.Row():
- gr.HTML("
Actual Historical Financial Statements Data from Financial Modelling Prep API
")
- with gr.Row():
- gr.HTML("
https://site.financialmodelingprep.com/developer
")
- with gr.Row():
- gr.HTML("
IMPORTANT NOTE: DCF model works best only if the free cash flows are POSITIVE, STABLE and STEADILY INCREASING. Check if this is the case.
")
-
- with gr.Row():
- fig_cash_flow = gr.Plot(label="Historical Cash Flows")
-
- with gr.Row():
- q_cash_flow_statement = gr.DataFrame(label="Last 4 Quarterly Cash Flow Statements")
-
- with gr.Row():
- final_cash_flow_statement = gr.DataFrame(label="TTM + Annual Cash Flow Statements")
-
- with gr.Row():
- q_balance_statement = gr.DataFrame(label="Quarterly Balance Statements")
-
-
- # Show forecasted cash flows and terminal value
- with gr.Row():
- gr.HTML("
Forecasted Cash Flows for Next 10 Years
")
-
- with gr.Row():
- fig_cash_forecast = gr.Plot(label="Forecasted Cash Flows")
-
- with gr.Row():
- forecast_cash_flows_df = gr.DataFrame(label="Forecasted Cash Flows")
-
- with gr.Row():
- terminal_value = gr.Text(label="Terminal Value (after 10th year)")
-
- btn.click(fn=run_all_steps, inputs=[ticker],
- outputs=[q_cash_flow_statement, final_cash_flow_statement, q_balance_statement, fig_cash_flow, \
- EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate, beta, shares_outstanding, current_price, \
- discount_rate, forecast_cash_flows_df, terminal_value, intrinsic_value, fig_cash_forecast, margin_of_safety])
-
- ticker.submit(fn=run_all_steps, inputs=[ticker],
- outputs=[q_cash_flow_statement, final_cash_flow_statement, q_balance_statement, fig_cash_flow, \
- EPS_growth_5Y, EPS_growth_6Y_to_10Y, long_term_growth_rate, beta, shares_outstanding, current_price, \
- discount_rate, forecast_cash_flows_df, terminal_value, intrinsic_value, fig_cash_forecast, margin_of_safety])
-
-app.launch()
\ No newline at end of file
diff --git a/spaces/bradarrML/stablediffusion-infinity/PyPatchMatch/csrc/nnf.h b/spaces/bradarrML/stablediffusion-infinity/PyPatchMatch/csrc/nnf.h
deleted file mode 100644
index b5c144a4a58649906c9c87a40044b5118a00aa04..0000000000000000000000000000000000000000
--- a/spaces/bradarrML/stablediffusion-infinity/PyPatchMatch/csrc/nnf.h
+++ /dev/null
@@ -1,133 +0,0 @@
-#pragma once
-
-#include
-#include "masked_image.h"
-
-class PatchDistanceMetric {
-public:
- PatchDistanceMetric(int patch_size) : m_patch_size(patch_size) {}
- virtual ~PatchDistanceMetric() = default;
-
- inline int patch_size() const { return m_patch_size; }
- virtual int operator()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const = 0;
- static const int kDistanceScale;
-
-protected:
- int m_patch_size;
-};
-
-class NearestNeighborField {
-public:
- NearestNeighborField() : m_source(), m_target(), m_field(), m_distance_metric(nullptr) {
- // pass
- }
- NearestNeighborField(const MaskedImage &source, const MaskedImage &target, const PatchDistanceMetric *metric, int max_retry = 20)
- : m_source(source), m_target(target), m_distance_metric(metric) {
- m_field = cv::Mat(m_source.size(), CV_32SC3);
- _randomize_field(max_retry);
- }
- NearestNeighborField(const MaskedImage &source, const MaskedImage &target, const PatchDistanceMetric *metric, const NearestNeighborField &other, int max_retry = 20)
- : m_source(source), m_target(target), m_distance_metric(metric) {
- m_field = cv::Mat(m_source.size(), CV_32SC3);
- _initialize_field_from(other, max_retry);
- }
-
- const MaskedImage &source() const {
- return m_source;
- }
- const MaskedImage &target() const {
- return m_target;
- }
- inline cv::Size source_size() const {
- return m_source.size();
- }
- inline cv::Size target_size() const {
- return m_target.size();
- }
- inline void set_source(const MaskedImage &source) {
- m_source = source;
- }
- inline void set_target(const MaskedImage &target) {
- m_target = target;
- }
-
- inline int *mutable_ptr(int y, int x) {
- return m_field.ptr(y, x);
- }
- inline const int *ptr(int y, int x) const {
- return m_field.ptr(y, x);
- }
-
- inline int at(int y, int x, int c) const {
- return m_field.ptr(y, x)[c];
- }
- inline int &at(int y, int x, int c) {
- return m_field.ptr(y, x)[c];
- }
- inline void set_identity(int y, int x) {
- auto ptr = mutable_ptr(y, x);
- ptr[0] = y, ptr[1] = x, ptr[2] = 0;
- }
-
- void minimize(int nr_pass);
-
-private:
- inline int _distance(int source_y, int source_x, int target_y, int target_x) {
- return (*m_distance_metric)(m_source, source_y, source_x, m_target, target_y, target_x);
- }
-
- void _randomize_field(int max_retry = 20, bool reset = true);
- void _initialize_field_from(const NearestNeighborField &other, int max_retry);
- void _minimize_link(int y, int x, int direction);
-
- MaskedImage m_source;
- MaskedImage m_target;
- cv::Mat m_field; // { y_target, x_target, distance_scaled }
- const PatchDistanceMetric *m_distance_metric;
-};
-
-
-class PatchSSDDistanceMetric : public PatchDistanceMetric {
-public:
- using PatchDistanceMetric::PatchDistanceMetric;
- virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const;
- static const int kSSDScale;
-};
-
-class DebugPatchSSDDistanceMetric : public PatchDistanceMetric {
-public:
- DebugPatchSSDDistanceMetric(int patch_size, int width, int height) : PatchDistanceMetric(patch_size), m_width(width), m_height(height) {}
- virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const;
-protected:
- int m_width, m_height;
-};
-
-class RegularityGuidedPatchDistanceMetricV1 : public PatchDistanceMetric {
-public:
- RegularityGuidedPatchDistanceMetricV1(int patch_size, double dx1, double dy1, double dx2, double dy2, double weight)
- : PatchDistanceMetric(patch_size), m_dx1(dx1), m_dy1(dy1), m_dx2(dx2), m_dy2(dy2), m_weight(weight) {
-
- assert(m_dy1 == 0);
- assert(m_dx2 == 0);
- m_scale = sqrt(m_dx1 * m_dx1 + m_dy2 * m_dy2) / 4;
- }
- virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const;
-
-protected:
- double m_dx1, m_dy1, m_dx2, m_dy2;
- double m_scale, m_weight;
-};
-
-class RegularityGuidedPatchDistanceMetricV2 : public PatchDistanceMetric {
-public:
- RegularityGuidedPatchDistanceMetricV2(int patch_size, cv::Mat ijmap, double weight)
- : PatchDistanceMetric(patch_size), m_ijmap(ijmap), m_weight(weight) {
-
- }
- virtual int operator ()(const MaskedImage &source, int source_y, int source_x, const MaskedImage &target, int target_y, int target_x) const;
-
-protected:
- cv::Mat m_ijmap;
- double m_width, m_height, m_weight;
-};
-
diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py b/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py
deleted file mode 100644
index 78ce7c6cb02fa01f6319d088349ff4f422001839..0000000000000000000000000000000000000000
--- a/spaces/brjathu/HMR2.0/vendor/detectron2/projects/DensePose/densepose/modeling/losses/chart_with_confidences.py
+++ /dev/null
@@ -1,209 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import math
-from typing import Any, List
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-from detectron2.config import CfgNode
-from detectron2.structures import Instances
-
-from .. import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType
-from .chart import DensePoseChartLoss
-from .registry import DENSEPOSE_LOSS_REGISTRY
-from .utils import BilinearInterpolationHelper, LossDict
-
-
-@DENSEPOSE_LOSS_REGISTRY.register()
-class DensePoseChartWithConfidenceLoss(DensePoseChartLoss):
- """ """
-
- def __init__(self, cfg: CfgNode):
- super().__init__(cfg)
- self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg)
- if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO:
- self.uv_loss_with_confidences = IIDIsotropicGaussianUVLoss(
- self.confidence_model_cfg.uv_confidence.epsilon
- )
- elif self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO:
- self.uv_loss_with_confidences = IndepAnisotropicGaussianUVLoss(
- self.confidence_model_cfg.uv_confidence.epsilon
- )
-
- def produce_fake_densepose_losses_uv(self, densepose_predictor_outputs: Any) -> LossDict:
- """
- Overrides fake losses for fine segmentation and U/V coordinates to
- include computation graphs for additional confidence parameters.
- These are used when no suitable ground truth data was found in a batch.
- The loss has a value 0 and is primarily used to construct the computation graph,
- so that `DistributedDataParallel` has similar graphs on all GPUs and can
- perform reduction properly.
-
- Args:
- densepose_predictor_outputs: DensePose predictor outputs, an object
- of a dataclass that is assumed to have the following attributes:
- * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
- * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
- * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
- Return:
- dict: str -> tensor: dict of losses with the following entries:
- * `loss_densepose_U`: has value 0
- * `loss_densepose_V`: has value 0
- * `loss_densepose_I`: has value 0
- """
- conf_type = self.confidence_model_cfg.uv_confidence.type
- if self.confidence_model_cfg.uv_confidence.enabled:
- loss_uv = (
- densepose_predictor_outputs.u.sum() + densepose_predictor_outputs.v.sum()
- ) * 0
- if conf_type == DensePoseUVConfidenceType.IID_ISO:
- loss_uv += densepose_predictor_outputs.sigma_2.sum() * 0
- elif conf_type == DensePoseUVConfidenceType.INDEP_ANISO:
- loss_uv += (
- densepose_predictor_outputs.sigma_2.sum()
- + densepose_predictor_outputs.kappa_u.sum()
- + densepose_predictor_outputs.kappa_v.sum()
- ) * 0
- return {"loss_densepose_UV": loss_uv}
- else:
- return super().produce_fake_densepose_losses_uv(densepose_predictor_outputs)
-
- def produce_densepose_losses_uv(
- self,
- proposals_with_gt: List[Instances],
- densepose_predictor_outputs: Any,
- packed_annotations: Any,
- interpolator: BilinearInterpolationHelper,
- j_valid_fg: torch.Tensor,
- ) -> LossDict:
- conf_type = self.confidence_model_cfg.uv_confidence.type
- if self.confidence_model_cfg.uv_confidence.enabled:
- u_gt = packed_annotations.u_gt[j_valid_fg]
- u_est = interpolator.extract_at_points(densepose_predictor_outputs.u)[j_valid_fg]
- v_gt = packed_annotations.v_gt[j_valid_fg]
- v_est = interpolator.extract_at_points(densepose_predictor_outputs.v)[j_valid_fg]
- sigma_2_est = interpolator.extract_at_points(densepose_predictor_outputs.sigma_2)[
- j_valid_fg
- ]
- if conf_type == DensePoseUVConfidenceType.IID_ISO:
- return {
- "loss_densepose_UV": (
- self.uv_loss_with_confidences(u_est, v_est, sigma_2_est, u_gt, v_gt)
- * self.w_points
- )
- }
- elif conf_type in [DensePoseUVConfidenceType.INDEP_ANISO]:
- kappa_u_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_u)[
- j_valid_fg
- ]
- kappa_v_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_v)[
- j_valid_fg
- ]
- return {
- "loss_densepose_UV": (
- self.uv_loss_with_confidences(
- u_est, v_est, sigma_2_est, kappa_u_est, kappa_v_est, u_gt, v_gt
- )
- * self.w_points
- )
- }
- return super().produce_densepose_losses_uv(
- proposals_with_gt,
- densepose_predictor_outputs,
- packed_annotations,
- interpolator,
- j_valid_fg,
- )
-
-
-class IIDIsotropicGaussianUVLoss(nn.Module):
- """
- Loss for the case of iid residuals with isotropic covariance:
- $Sigma_i = sigma_i^2 I$
- The loss (negative log likelihood) is then:
- $1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$,
- where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
- difference between estimated and ground truth UV values
- For details, see:
- N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
- Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
- """
-
- def __init__(self, sigma_lower_bound: float):
- super(IIDIsotropicGaussianUVLoss, self).__init__()
- self.sigma_lower_bound = sigma_lower_bound
- self.log2pi = math.log(2 * math.pi)
-
- def forward(
- self,
- u: torch.Tensor,
- v: torch.Tensor,
- sigma_u: torch.Tensor,
- target_u: torch.Tensor,
- target_v: torch.Tensor,
- ):
- # compute $\sigma_i^2$
- # use sigma_lower_bound to avoid degenerate solution for variance
- # (sigma -> 0)
- sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
- # compute \|delta_i\|^2
- # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
- delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2
- # the total loss from the formula above:
- loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta / sigma2)
- return loss.sum()
-
-
-class IndepAnisotropicGaussianUVLoss(nn.Module):
- """
- Loss for the case of independent residuals with anisotropic covariances:
- $Sigma_i = sigma_i^2 I + r_i r_i^T$
- The loss (negative log likelihood) is then:
- $1/2 sum_{i=1}^n (log(2 pi)
- + log sigma_i^2 (sigma_i^2 + ||r_i||^2)
- + ||delta_i||^2 / sigma_i^2
- - ^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
- where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
- difference between estimated and ground truth UV values
- For details, see:
- N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
- Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
- """
-
- def __init__(self, sigma_lower_bound: float):
- super(IndepAnisotropicGaussianUVLoss, self).__init__()
- self.sigma_lower_bound = sigma_lower_bound
- self.log2pi = math.log(2 * math.pi)
-
- def forward(
- self,
- u: torch.Tensor,
- v: torch.Tensor,
- sigma_u: torch.Tensor,
- kappa_u_est: torch.Tensor,
- kappa_v_est: torch.Tensor,
- target_u: torch.Tensor,
- target_v: torch.Tensor,
- ):
- # compute $\sigma_i^2$
- sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
- # compute \|r_i\|^2
- # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
- r_sqnorm2 = kappa_u_est**2 + kappa_v_est**2
- delta_u = u - target_u
- delta_v = v - target_v
- # compute \|delta_i\|^2
- # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
- delta_sqnorm = delta_u**2 + delta_v**2
- delta_u_r_u = delta_u * kappa_u_est
- delta_v_r_v = delta_v * kappa_v_est
- # compute the scalar product
- delta_r = delta_u_r_u + delta_v_r_v
- # compute squared scalar product ^2
- # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
- delta_r_sqnorm = delta_r**2
- denom2 = sigma2 * (sigma2 + r_sqnorm2)
- loss = 0.5 * (
- self.log2pi + torch.log(denom2) + delta_sqnorm / sigma2 - delta_r_sqnorm / denom2
- )
- return loss.sum()
diff --git a/spaces/cahya/indonesian-story/app/SessionState.py b/spaces/cahya/indonesian-story/app/SessionState.py
deleted file mode 100644
index 594c8dcbd209dae45555a7888af39be2c79fd9dc..0000000000000000000000000000000000000000
--- a/spaces/cahya/indonesian-story/app/SessionState.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""Hack to add per-session state to Streamlit.
-Usage
------
->>> import SessionState
->>>
->>> session_state = SessionState.get(user_name='', favorite_color='black')
->>> session_state.user_name
-''
->>> session_state.user_name = 'Mary'
->>> session_state.favorite_color
-'black'
-Since you set user_name above, next time your script runs this will be the
-result:
->>> session_state = get(user_name='', favorite_color='black')
->>> session_state.user_name
-'Mary'
-"""
-try:
- import streamlit.ReportThread as ReportThread
- from streamlit.server.Server import Server
-except Exception:
- # Streamlit >= 0.65.0
- import streamlit.report_thread as ReportThread
- from streamlit.server.server import Server
-
-
-class SessionState(object):
- def __init__(self, **kwargs):
- """A new SessionState object.
- Parameters
- ----------
- **kwargs : any
- Default values for the session state.
- Example
- -------
- >>> session_state = SessionState(user_name='', favorite_color='black')
- >>> session_state.user_name = 'Mary'
- ''
- >>> session_state.favorite_color
- 'black'
- """
- for key, val in kwargs.items():
- setattr(self, key, val)
-
-
-def get(**kwargs):
- """Gets a SessionState object for the current session.
- Creates a new object if necessary.
- Parameters
- ----------
- **kwargs : any
- Default values you want to add to the session state, if we're creating a
- new one.
- Example
- -------
- >>> session_state = get(user_name='', favorite_color='black')
- >>> session_state.user_name
- ''
- >>> session_state.user_name = 'Mary'
- >>> session_state.favorite_color
- 'black'
- Since you set user_name above, next time your script runs this will be the
- result:
- >>> session_state = get(user_name='', favorite_color='black')
- >>> session_state.user_name
- 'Mary'
- """
- # Hack to get the session object from Streamlit.
-
- ctx = ReportThread.get_report_ctx()
-
- this_session = None
-
- current_server = Server.get_current()
- if hasattr(current_server, '_session_infos'):
- # Streamlit < 0.56
- session_infos = Server.get_current()._session_infos.values()
- else:
- session_infos = Server.get_current()._session_info_by_id.values()
-
- for session_info in session_infos:
- s = session_info.session
- if (
- # Streamlit < 0.54.0
- (hasattr(s, '_main_dg') and s._main_dg == ctx.main_dg)
- or
- # Streamlit >= 0.54.0
- (not hasattr(s, '_main_dg') and s.enqueue == ctx.enqueue)
- or
- # Streamlit >= 0.65.2
- (not hasattr(s, '_main_dg') and s._uploaded_file_mgr == ctx.uploaded_file_mgr)
- ):
- this_session = s
-
- if this_session is None:
- raise RuntimeError(
- "Oh noes. Couldn't get your Streamlit Session object. "
- 'Are you doing something fancy with threads?')
-
- # Got the session object! Now let's attach some state into it.
-
- if not hasattr(this_session, '_custom_session_state'):
- this_session._custom_session_state = SessionState(**kwargs)
-
- return this_session._custom_session_state
-
-__all__ = ['get']
\ No newline at end of file
diff --git a/spaces/ceshine/t5-paraphrasing/README.md b/spaces/ceshine/t5-paraphrasing/README.md
deleted file mode 100644
index 0b6f56c6e8378551d2be3cb52a690490dbe483c5..0000000000000000000000000000000000000000
--- a/spaces/ceshine/t5-paraphrasing/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
----
-title: T5 Paraphrasing
-emoji: 🦀
-colorFrom: indigo
-colorTo: purple
-sdk: gradio
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio`, `streamlit`, or `static`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
-Path is relative to the root of the repository.
-
-`models`: _List[string]_
-HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
-Will be parsed automatically from your code if not specified here.
-
-`datasets`: _List[string]_
-HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
-Will be parsed automatically from your code if not specified here.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/chansung/LLM-As-Chatbot/chats/__init__.py b/spaces/chansung/LLM-As-Chatbot/chats/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/chansung/LLM-As-Chatbot/chats/alpaca.py b/spaces/chansung/LLM-As-Chatbot/chats/alpaca.py
deleted file mode 100644
index bcbf8c175a4032ea5f43fd017bf25e528b69ce99..0000000000000000000000000000000000000000
--- a/spaces/chansung/LLM-As-Chatbot/chats/alpaca.py
+++ /dev/null
@@ -1,108 +0,0 @@
-import copy
-import json
-import global_vars
-from chats import pre, post
-from pingpong import PingPong
-from gens.batch_gen import get_output_batch
-
-from pingpong.context import CtxLastWindowStrategy
-
-def build_prompts(ppmanager, user_message, global_context, win_size=3):
- dummy_ppm = copy.deepcopy(ppmanager)
-
- dummy_ppm.ctx = global_context
- for pingpong in dummy_ppm.pingpongs:
- pong = pingpong.pong
- first_sentence = pong.split("\n")[0]
- if first_sentence != "" and \
- pre.contains_image_markdown(first_sentence):
- pong = ' '.join(pong.split("\n")[1:]).strip()
- pingpong.pong = pong
-
- lws = CtxLastWindowStrategy(win_size)
-
- prompt = lws(dummy_ppm)
- return prompt
-
-def text_stream(ppmanager, streamer, model_thumbnail_tiny, model_type):
- count = 0
-
- for new_text in streamer:
- if count == 0:
- ppmanager.append_pong(f"***[{model_type}]***\n")
- count = count + 1
-
- ppmanager.append_pong(new_text)
- yield ppmanager, ppmanager.build_uis()
-
- yield ppmanager, ppmanager.build_uis()
-
-def summarize(
- ppmanager, prompt_to_summarize, win_size,
- temperature, top_p, top_k, repetition_penalty, max_new_tokens,
- num_beams, use_cache, do_sample, eos_token_id, pad_token_id
-):
- ctx = ppmanager.ctx
- last_pong = ppmanager.pingpongs[-1].pong
- ppmanager.add_pingpong(PingPong(prompt_to_summarize, ""))
- prompt = ppmanager.build_prompts(from_idx=-win_size)
-
- _, gen_config_summarization = pre.build_gen_config(
- temperature, top_p, top_k, repetition_penalty, max_new_tokens,
- num_beams, use_cache, do_sample, eos_token_id, pad_token_id
- )
- summarize_output = get_output_batch(
- global_vars.model, global_vars.tokenizer, [prompt], gen_config_summarization
- )[0].split("### Response:")[-1].strip()
- ppmanager.ctx = summarize_output
- ppmanager.pop_pingpong()
- return ppmanager
-
-def chat_stream(
- idx, local_data, user_message, state, model_num,
- global_context, ctx_num_lconv, ctx_sum_prompt,
- res_temp, res_topp, res_topk, res_rpen, res_mnts, res_beams, res_cache, res_sample, res_eosid, res_padid,
-):
- res = [
- state["ppmanager_type"].from_json(json.dumps(ppm))
- for ppm in local_data
- ]
-
- ppm = res[idx]
-
- # add_ping returns a prompt structured in Alpaca form
- ppm.add_pingpong(
- PingPong(user_message, "")
- )
- prompt = build_prompts(ppm, user_message, global_context, ctx_num_lconv)
-
- # prepare text generating streamer & start generating
- gen_kwargs, streamer = pre.build(
- prompt, model_num,
- res_temp, res_topp, res_topk, res_rpen, res_mnts,
- res_beams, res_cache, res_sample, res_eosid, res_padid,
- return_token_type_ids=False
- )
- pre.start_gen(gen_kwargs, model_num)
-
- model_thumbnail_tiny = global_vars.models[model_num]["model_thumb_tiny"]
- model_type = global_vars.models[model_num]["model_type"]
- for ppmanager, uis in text_stream(ppm, streamer, model_thumbnail_tiny, model_type):
- yield "", uis, prompt, str(res)
-
- ppm = post.strip_pong(ppm)
- yield "", ppm.build_uis(), prompt, str(res)
-
- # summarization
- # ppm.add_pingpong(
- # PingPong(None, "")
- # )
- # yield "", ppm.build_uis(), prompt, state
- # ppm.pop_pingpong()
-
- # ppm = summarize(
- # ppm, ctx_sum_prompt, ctx_num_lconv,
- # sum_temp, sum_topp, sum_topk, sum_rpen, sum_mnts,
- # sum_beams, sum_cache, sum_sample, sum_eosid, sum_padid
- # )
- yield "", ppm.build_uis(), prompt, str(res)
\ No newline at end of file
diff --git a/spaces/cheetah003/HMMC_t2v_search/modules/__init__.py b/spaces/cheetah003/HMMC_t2v_search/modules/__init__.py
deleted file mode 100644
index 661b40d6f6b2c7b9ff06c0160b09aaaaa3b8354a..0000000000000000000000000000000000000000
--- a/spaces/cheetah003/HMMC_t2v_search/modules/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-#encoding:utf-8
\ No newline at end of file
diff --git a/spaces/chendl/compositional_test/multimodal/YOLOX/demo/OpenVINO/python/README.md b/spaces/chendl/compositional_test/multimodal/YOLOX/demo/OpenVINO/python/README.md
deleted file mode 100644
index bbaf5aca44e86523c428735745848d2839351552..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/multimodal/YOLOX/demo/OpenVINO/python/README.md
+++ /dev/null
@@ -1,89 +0,0 @@
-# YOLOX-OpenVINO in Python
-
-This tutorial includes a Python demo for OpenVINO, as well as some converted models.
-
-### Download OpenVINO models.
-
-| Model | Parameters | GFLOPs | Test Size | mAP | Weights |
-|:------| :----: | :----: | :---: | :---: | :---: |
-| [YOLOX-Nano](../../../exps/default/nano.py) | 0.91M | 1.08 | 416x416 | 25.8 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_nano_openvino.tar.gz) |
-| [YOLOX-Tiny](../../../exps/default/yolox_tiny.py) | 5.06M | 6.45 | 416x416 |32.8 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_tiny_openvino.tar.gz) |
-| [YOLOX-S](../../../exps/default/yolox_s.py) | 9.0M | 26.8 | 640x640 |40.5 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_s_openvino.tar.gz) |
-| [YOLOX-M](../../../exps/default/yolox_m.py) | 25.3M | 73.8 | 640x640 |47.2 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_m_openvino.tar.gz) |
-| [YOLOX-L](../../../exps/default/yolox_l.py) | 54.2M | 155.6 | 640x640 |50.1 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_l_openvino.tar.gz) |
-| [YOLOX-Darknet53](../../../exps/default/yolov3.py) | 63.72M | 185.3 | 640x640 |48.0 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_dark_openvino.tar.gz) |
-| [YOLOX-X](../../../exps/default/yolox_x.py) | 99.1M | 281.9 | 640x640 |51.5 | [github](https://github.com/Megvii-BaseDetection/YOLOX/releases/download/0.1.1rc0/yolox_x_openvino.tar.gz) |
-
-## Install OpenVINO Toolkit
-
-Please visit [Openvino Homepage](https://docs.openvinotoolkit.org/latest/get_started_guides.html) for more details.
-
-## Set up the Environment
-
-### For Linux
-
-**Option1. Set up the environment tempororally. You need to run this command everytime you start a new shell window.**
-
-```shell
-source /opt/intel/openvino_2021/bin/setupvars.sh
-```
-
-**Option2. Set up the environment permenantly.**
-
-*Step1.* For Linux:
-```shell
-vim ~/.bashrc
-```
-
-*Step2.* Add the following line into your file:
-
-```shell
-source /opt/intel/openvino_2021/bin/setupvars.sh
-```
-
-*Step3.* Save and exit the file, then run:
-
-```shell
-source ~/.bashrc
-```
-
-
-## Convert model
-
-1. Export ONNX model
-
- Please refer to the [ONNX tutorial](https://github.com/Megvii-BaseDetection/YOLOX/demo/ONNXRuntime). **Note that you should set --opset to 10, otherwise your next step will fail.**
-
-2. Convert ONNX to OpenVINO
-
- ``` shell
- cd /openvino_2021/deployment_tools/model_optimizer
- ```
-
- Install requirements for convert tool
-
- ```shell
- sudo ./install_prerequisites/install_prerequisites_onnx.sh
- ```
-
- Then convert model.
- ```shell
- python3 mo.py --input_model --input_shape [--data_type FP16]
- ```
- For example:
- ```shell
- python3 mo.py --input_model yolox.onnx --input_shape [1,3,640,640] --data_type FP16 --output_dir converted_output
- ```
-
-## Demo
-
-### python
-
-```shell
-python openvino_inference.py -m -i
-```
-or
-```shell
-python openvino_inference.py -m -i -o -s -d
-```
-
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcnsImagePlugin.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcnsImagePlugin.py
deleted file mode 100644
index 27cb89f735e2a1883b2b52ee42fd9ba34c5805fb..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcnsImagePlugin.py
+++ /dev/null
@@ -1,399 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# macOS icns file decoder, based on icns.py by Bob Ippolito.
-#
-# history:
-# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies.
-# 2020-04-04 Allow saving on all operating systems.
-#
-# Copyright (c) 2004 by Bob Ippolito.
-# Copyright (c) 2004 by Secret Labs.
-# Copyright (c) 2004 by Fredrik Lundh.
-# Copyright (c) 2014 by Alastair Houghton.
-# Copyright (c) 2020 by Pan Jing.
-#
-# See the README file for information on usage and redistribution.
-#
-
-import io
-import os
-import struct
-import sys
-
-from . import Image, ImageFile, PngImagePlugin, features
-
-enable_jpeg2k = features.check_codec("jpg_2000")
-if enable_jpeg2k:
- from . import Jpeg2KImagePlugin
-
-MAGIC = b"icns"
-HEADERSIZE = 8
-
-
-def nextheader(fobj):
- return struct.unpack(">4sI", fobj.read(HEADERSIZE))
-
-
-def read_32t(fobj, start_length, size):
- # The 128x128 icon seems to have an extra header for some reason.
- (start, length) = start_length
- fobj.seek(start)
- sig = fobj.read(4)
- if sig != b"\x00\x00\x00\x00":
- msg = "Unknown signature, expecting 0x00000000"
- raise SyntaxError(msg)
- return read_32(fobj, (start + 4, length - 4), size)
-
-
-def read_32(fobj, start_length, size):
- """
- Read a 32bit RGB icon resource. Seems to be either uncompressed or
- an RLE packbits-like scheme.
- """
- (start, length) = start_length
- fobj.seek(start)
- pixel_size = (size[0] * size[2], size[1] * size[2])
- sizesq = pixel_size[0] * pixel_size[1]
- if length == sizesq * 3:
- # uncompressed ("RGBRGBGB")
- indata = fobj.read(length)
- im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1)
- else:
- # decode image
- im = Image.new("RGB", pixel_size, None)
- for band_ix in range(3):
- data = []
- bytesleft = sizesq
- while bytesleft > 0:
- byte = fobj.read(1)
- if not byte:
- break
- byte = byte[0]
- if byte & 0x80:
- blocksize = byte - 125
- byte = fobj.read(1)
- for i in range(blocksize):
- data.append(byte)
- else:
- blocksize = byte + 1
- data.append(fobj.read(blocksize))
- bytesleft -= blocksize
- if bytesleft <= 0:
- break
- if bytesleft != 0:
- msg = f"Error reading channel [{repr(bytesleft)} left]"
- raise SyntaxError(msg)
- band = Image.frombuffer("L", pixel_size, b"".join(data), "raw", "L", 0, 1)
- im.im.putband(band.im, band_ix)
- return {"RGB": im}
-
-
-def read_mk(fobj, start_length, size):
- # Alpha masks seem to be uncompressed
- start = start_length[0]
- fobj.seek(start)
- pixel_size = (size[0] * size[2], size[1] * size[2])
- sizesq = pixel_size[0] * pixel_size[1]
- band = Image.frombuffer("L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1)
- return {"A": band}
-
-
-def read_png_or_jpeg2000(fobj, start_length, size):
- (start, length) = start_length
- fobj.seek(start)
- sig = fobj.read(12)
- if sig[:8] == b"\x89PNG\x0d\x0a\x1a\x0a":
- fobj.seek(start)
- im = PngImagePlugin.PngImageFile(fobj)
- Image._decompression_bomb_check(im.size)
- return {"RGBA": im}
- elif (
- sig[:4] == b"\xff\x4f\xff\x51"
- or sig[:4] == b"\x0d\x0a\x87\x0a"
- or sig == b"\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a"
- ):
- if not enable_jpeg2k:
- msg = (
- "Unsupported icon subimage format (rebuild PIL "
- "with JPEG 2000 support to fix this)"
- )
- raise ValueError(msg)
- # j2k, jpc or j2c
- fobj.seek(start)
- jp2kstream = fobj.read(length)
- f = io.BytesIO(jp2kstream)
- im = Jpeg2KImagePlugin.Jpeg2KImageFile(f)
- Image._decompression_bomb_check(im.size)
- if im.mode != "RGBA":
- im = im.convert("RGBA")
- return {"RGBA": im}
- else:
- msg = "Unsupported icon subimage format"
- raise ValueError(msg)
-
-
-class IcnsFile:
- SIZES = {
- (512, 512, 2): [(b"ic10", read_png_or_jpeg2000)],
- (512, 512, 1): [(b"ic09", read_png_or_jpeg2000)],
- (256, 256, 2): [(b"ic14", read_png_or_jpeg2000)],
- (256, 256, 1): [(b"ic08", read_png_or_jpeg2000)],
- (128, 128, 2): [(b"ic13", read_png_or_jpeg2000)],
- (128, 128, 1): [
- (b"ic07", read_png_or_jpeg2000),
- (b"it32", read_32t),
- (b"t8mk", read_mk),
- ],
- (64, 64, 1): [(b"icp6", read_png_or_jpeg2000)],
- (32, 32, 2): [(b"ic12", read_png_or_jpeg2000)],
- (48, 48, 1): [(b"ih32", read_32), (b"h8mk", read_mk)],
- (32, 32, 1): [
- (b"icp5", read_png_or_jpeg2000),
- (b"il32", read_32),
- (b"l8mk", read_mk),
- ],
- (16, 16, 2): [(b"ic11", read_png_or_jpeg2000)],
- (16, 16, 1): [
- (b"icp4", read_png_or_jpeg2000),
- (b"is32", read_32),
- (b"s8mk", read_mk),
- ],
- }
-
- def __init__(self, fobj):
- """
- fobj is a file-like object as an icns resource
- """
- # signature : (start, length)
- self.dct = dct = {}
- self.fobj = fobj
- sig, filesize = nextheader(fobj)
- if not _accept(sig):
- msg = "not an icns file"
- raise SyntaxError(msg)
- i = HEADERSIZE
- while i < filesize:
- sig, blocksize = nextheader(fobj)
- if blocksize <= 0:
- msg = "invalid block header"
- raise SyntaxError(msg)
- i += HEADERSIZE
- blocksize -= HEADERSIZE
- dct[sig] = (i, blocksize)
- fobj.seek(blocksize, io.SEEK_CUR)
- i += blocksize
-
- def itersizes(self):
- sizes = []
- for size, fmts in self.SIZES.items():
- for fmt, reader in fmts:
- if fmt in self.dct:
- sizes.append(size)
- break
- return sizes
-
- def bestsize(self):
- sizes = self.itersizes()
- if not sizes:
- msg = "No 32bit icon resources found"
- raise SyntaxError(msg)
- return max(sizes)
-
- def dataforsize(self, size):
- """
- Get an icon resource as {channel: array}. Note that
- the arrays are bottom-up like windows bitmaps and will likely
- need to be flipped or transposed in some way.
- """
- dct = {}
- for code, reader in self.SIZES[size]:
- desc = self.dct.get(code)
- if desc is not None:
- dct.update(reader(self.fobj, desc, size))
- return dct
-
- def getimage(self, size=None):
- if size is None:
- size = self.bestsize()
- if len(size) == 2:
- size = (size[0], size[1], 1)
- channels = self.dataforsize(size)
-
- im = channels.get("RGBA", None)
- if im:
- return im
-
- im = channels.get("RGB").copy()
- try:
- im.putalpha(channels["A"])
- except KeyError:
- pass
- return im
-
-
-##
-# Image plugin for Mac OS icons.
-
-
-class IcnsImageFile(ImageFile.ImageFile):
- """
- PIL image support for Mac OS .icns files.
- Chooses the best resolution, but will possibly load
- a different size image if you mutate the size attribute
- before calling 'load'.
-
- The info dictionary has a key 'sizes' that is a list
- of sizes that the icns file has.
- """
-
- format = "ICNS"
- format_description = "Mac OS icns resource"
-
- def _open(self):
- self.icns = IcnsFile(self.fp)
- self.mode = "RGBA"
- self.info["sizes"] = self.icns.itersizes()
- self.best_size = self.icns.bestsize()
- self.size = (
- self.best_size[0] * self.best_size[2],
- self.best_size[1] * self.best_size[2],
- )
-
- @property
- def size(self):
- return self._size
-
- @size.setter
- def size(self, value):
- info_size = value
- if info_size not in self.info["sizes"] and len(info_size) == 2:
- info_size = (info_size[0], info_size[1], 1)
- if (
- info_size not in self.info["sizes"]
- and len(info_size) == 3
- and info_size[2] == 1
- ):
- simple_sizes = [
- (size[0] * size[2], size[1] * size[2]) for size in self.info["sizes"]
- ]
- if value in simple_sizes:
- info_size = self.info["sizes"][simple_sizes.index(value)]
- if info_size not in self.info["sizes"]:
- msg = "This is not one of the allowed sizes of this image"
- raise ValueError(msg)
- self._size = value
-
- def load(self):
- if len(self.size) == 3:
- self.best_size = self.size
- self.size = (
- self.best_size[0] * self.best_size[2],
- self.best_size[1] * self.best_size[2],
- )
-
- px = Image.Image.load(self)
- if self.im is not None and self.im.size == self.size:
- # Already loaded
- return px
- self.load_prepare()
- # This is likely NOT the best way to do it, but whatever.
- im = self.icns.getimage(self.best_size)
-
- # If this is a PNG or JPEG 2000, it won't be loaded yet
- px = im.load()
-
- self.im = im.im
- self.mode = im.mode
- self.size = im.size
-
- return px
-
-
-def _save(im, fp, filename):
- """
- Saves the image as a series of PNG files,
- that are then combined into a .icns file.
- """
- if hasattr(fp, "flush"):
- fp.flush()
-
- sizes = {
- b"ic07": 128,
- b"ic08": 256,
- b"ic09": 512,
- b"ic10": 1024,
- b"ic11": 32,
- b"ic12": 64,
- b"ic13": 256,
- b"ic14": 512,
- }
- provided_images = {im.width: im for im in im.encoderinfo.get("append_images", [])}
- size_streams = {}
- for size in set(sizes.values()):
- image = (
- provided_images[size]
- if size in provided_images
- else im.resize((size, size))
- )
-
- temp = io.BytesIO()
- image.save(temp, "png")
- size_streams[size] = temp.getvalue()
-
- entries = []
- for type, size in sizes.items():
- stream = size_streams[size]
- entries.append(
- {"type": type, "size": HEADERSIZE + len(stream), "stream": stream}
- )
-
- # Header
- fp.write(MAGIC)
- file_length = HEADERSIZE # Header
- file_length += HEADERSIZE + 8 * len(entries) # TOC
- file_length += sum(entry["size"] for entry in entries)
- fp.write(struct.pack(">i", file_length))
-
- # TOC
- fp.write(b"TOC ")
- fp.write(struct.pack(">i", HEADERSIZE + len(entries) * HEADERSIZE))
- for entry in entries:
- fp.write(entry["type"])
- fp.write(struct.pack(">i", entry["size"]))
-
- # Data
- for entry in entries:
- fp.write(entry["type"])
- fp.write(struct.pack(">i", entry["size"]))
- fp.write(entry["stream"])
-
- if hasattr(fp, "flush"):
- fp.flush()
-
-
-def _accept(prefix):
- return prefix[:4] == MAGIC
-
-
-Image.register_open(IcnsImageFile.format, IcnsImageFile, _accept)
-Image.register_extension(IcnsImageFile.format, ".icns")
-
-Image.register_save(IcnsImageFile.format, _save)
-Image.register_mime(IcnsImageFile.format, "image/icns")
-
-if __name__ == "__main__":
- if len(sys.argv) < 2:
- print("Syntax: python3 IcnsImagePlugin.py [file]")
- sys.exit()
-
- with open(sys.argv[1], "rb") as fp:
- imf = IcnsImageFile(fp)
- for size in imf.info["sizes"]:
- imf.size = size
- imf.save("out-%s-%s-%s.png" % size)
- with Image.open(sys.argv[1]) as im:
- im.save("out.png")
- if sys.platform == "windows":
- os.startfile("out.png")
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcoImagePlugin.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcoImagePlugin.py
deleted file mode 100644
index a188f8fdcea46e5cb9423a3c4572d88d93890fc6..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/IcoImagePlugin.py
+++ /dev/null
@@ -1,358 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# Windows Icon support for PIL
-#
-# History:
-# 96-05-27 fl Created
-#
-# Copyright (c) Secret Labs AB 1997.
-# Copyright (c) Fredrik Lundh 1996.
-#
-# See the README file for information on usage and redistribution.
-#
-
-# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
-# .
-# https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
-#
-# Icon format references:
-# * https://en.wikipedia.org/wiki/ICO_(file_format)
-# * https://msdn.microsoft.com/en-us/library/ms997538.aspx
-
-
-import warnings
-from io import BytesIO
-from math import ceil, log
-
-from . import BmpImagePlugin, Image, ImageFile, PngImagePlugin
-from ._binary import i16le as i16
-from ._binary import i32le as i32
-from ._binary import o8
-from ._binary import o16le as o16
-from ._binary import o32le as o32
-
-#
-# --------------------------------------------------------------------
-
-_MAGIC = b"\0\0\1\0"
-
-
-def _save(im, fp, filename):
- fp.write(_MAGIC) # (2+2)
- bmp = im.encoderinfo.get("bitmap_format") == "bmp"
- sizes = im.encoderinfo.get(
- "sizes",
- [(16, 16), (24, 24), (32, 32), (48, 48), (64, 64), (128, 128), (256, 256)],
- )
- frames = []
- provided_ims = [im] + im.encoderinfo.get("append_images", [])
- width, height = im.size
- for size in sorted(set(sizes)):
- if size[0] > width or size[1] > height or size[0] > 256 or size[1] > 256:
- continue
-
- for provided_im in provided_ims:
- if provided_im.size != size:
- continue
- frames.append(provided_im)
- if bmp:
- bits = BmpImagePlugin.SAVE[provided_im.mode][1]
- bits_used = [bits]
- for other_im in provided_ims:
- if other_im.size != size:
- continue
- bits = BmpImagePlugin.SAVE[other_im.mode][1]
- if bits not in bits_used:
- # Another image has been supplied for this size
- # with a different bit depth
- frames.append(other_im)
- bits_used.append(bits)
- break
- else:
- # TODO: invent a more convenient method for proportional scalings
- frame = provided_im.copy()
- frame.thumbnail(size, Image.Resampling.LANCZOS, reducing_gap=None)
- frames.append(frame)
- fp.write(o16(len(frames))) # idCount(2)
- offset = fp.tell() + len(frames) * 16
- for frame in frames:
- width, height = frame.size
- # 0 means 256
- fp.write(o8(width if width < 256 else 0)) # bWidth(1)
- fp.write(o8(height if height < 256 else 0)) # bHeight(1)
-
- bits, colors = BmpImagePlugin.SAVE[frame.mode][1:] if bmp else (32, 0)
- fp.write(o8(colors)) # bColorCount(1)
- fp.write(b"\0") # bReserved(1)
- fp.write(b"\0\0") # wPlanes(2)
- fp.write(o16(bits)) # wBitCount(2)
-
- image_io = BytesIO()
- if bmp:
- frame.save(image_io, "dib")
-
- if bits != 32:
- and_mask = Image.new("1", size)
- ImageFile._save(
- and_mask, image_io, [("raw", (0, 0) + size, 0, ("1", 0, -1))]
- )
- else:
- frame.save(image_io, "png")
- image_io.seek(0)
- image_bytes = image_io.read()
- if bmp:
- image_bytes = image_bytes[:8] + o32(height * 2) + image_bytes[12:]
- bytes_len = len(image_bytes)
- fp.write(o32(bytes_len)) # dwBytesInRes(4)
- fp.write(o32(offset)) # dwImageOffset(4)
- current = fp.tell()
- fp.seek(offset)
- fp.write(image_bytes)
- offset = offset + bytes_len
- fp.seek(current)
-
-
-def _accept(prefix):
- return prefix[:4] == _MAGIC
-
-
-class IcoFile:
- def __init__(self, buf):
- """
- Parse image from file-like object containing ico file data
- """
-
- # check magic
- s = buf.read(6)
- if not _accept(s):
- msg = "not an ICO file"
- raise SyntaxError(msg)
-
- self.buf = buf
- self.entry = []
-
- # Number of items in file
- self.nb_items = i16(s, 4)
-
- # Get headers for each item
- for i in range(self.nb_items):
- s = buf.read(16)
-
- icon_header = {
- "width": s[0],
- "height": s[1],
- "nb_color": s[2], # No. of colors in image (0 if >=8bpp)
- "reserved": s[3],
- "planes": i16(s, 4),
- "bpp": i16(s, 6),
- "size": i32(s, 8),
- "offset": i32(s, 12),
- }
-
- # See Wikipedia
- for j in ("width", "height"):
- if not icon_header[j]:
- icon_header[j] = 256
-
- # See Wikipedia notes about color depth.
- # We need this just to differ images with equal sizes
- icon_header["color_depth"] = (
- icon_header["bpp"]
- or (
- icon_header["nb_color"] != 0
- and ceil(log(icon_header["nb_color"], 2))
- )
- or 256
- )
-
- icon_header["dim"] = (icon_header["width"], icon_header["height"])
- icon_header["square"] = icon_header["width"] * icon_header["height"]
-
- self.entry.append(icon_header)
-
- self.entry = sorted(self.entry, key=lambda x: x["color_depth"])
- # ICO images are usually squares
- # self.entry = sorted(self.entry, key=lambda x: x['width'])
- self.entry = sorted(self.entry, key=lambda x: x["square"])
- self.entry.reverse()
-
- def sizes(self):
- """
- Get a list of all available icon sizes and color depths.
- """
- return {(h["width"], h["height"]) for h in self.entry}
-
- def getentryindex(self, size, bpp=False):
- for i, h in enumerate(self.entry):
- if size == h["dim"] and (bpp is False or bpp == h["color_depth"]):
- return i
- return 0
-
- def getimage(self, size, bpp=False):
- """
- Get an image from the icon
- """
- return self.frame(self.getentryindex(size, bpp))
-
- def frame(self, idx):
- """
- Get an image from frame idx
- """
-
- header = self.entry[idx]
-
- self.buf.seek(header["offset"])
- data = self.buf.read(8)
- self.buf.seek(header["offset"])
-
- if data[:8] == PngImagePlugin._MAGIC:
- # png frame
- im = PngImagePlugin.PngImageFile(self.buf)
- Image._decompression_bomb_check(im.size)
- else:
- # XOR + AND mask bmp frame
- im = BmpImagePlugin.DibImageFile(self.buf)
- Image._decompression_bomb_check(im.size)
-
- # change tile dimension to only encompass XOR image
- im._size = (im.size[0], int(im.size[1] / 2))
- d, e, o, a = im.tile[0]
- im.tile[0] = d, (0, 0) + im.size, o, a
-
- # figure out where AND mask image starts
- bpp = header["bpp"]
- if 32 == bpp:
- # 32-bit color depth icon image allows semitransparent areas
- # PIL's DIB format ignores transparency bits, recover them.
- # The DIB is packed in BGRX byte order where X is the alpha
- # channel.
-
- # Back up to start of bmp data
- self.buf.seek(o)
- # extract every 4th byte (eg. 3,7,11,15,...)
- alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4]
-
- # convert to an 8bpp grayscale image
- mask = Image.frombuffer(
- "L", # 8bpp
- im.size, # (w, h)
- alpha_bytes, # source chars
- "raw", # raw decoder
- ("L", 0, -1), # 8bpp inverted, unpadded, reversed
- )
- else:
- # get AND image from end of bitmap
- w = im.size[0]
- if (w % 32) > 0:
- # bitmap row data is aligned to word boundaries
- w += 32 - (im.size[0] % 32)
-
- # the total mask data is
- # padded row size * height / bits per char
-
- total_bytes = int((w * im.size[1]) / 8)
- and_mask_offset = header["offset"] + header["size"] - total_bytes
-
- self.buf.seek(and_mask_offset)
- mask_data = self.buf.read(total_bytes)
-
- # convert raw data to image
- mask = Image.frombuffer(
- "1", # 1 bpp
- im.size, # (w, h)
- mask_data, # source chars
- "raw", # raw decoder
- ("1;I", int(w / 8), -1), # 1bpp inverted, padded, reversed
- )
-
- # now we have two images, im is XOR image and mask is AND image
-
- # apply mask image as alpha channel
- im = im.convert("RGBA")
- im.putalpha(mask)
-
- return im
-
-
-##
-# Image plugin for Windows Icon files.
-
-
-class IcoImageFile(ImageFile.ImageFile):
- """
- PIL read-only image support for Microsoft Windows .ico files.
-
- By default the largest resolution image in the file will be loaded. This
- can be changed by altering the 'size' attribute before calling 'load'.
-
- The info dictionary has a key 'sizes' that is a list of the sizes available
- in the icon file.
-
- Handles classic, XP and Vista icon formats.
-
- When saving, PNG compression is used. Support for this was only added in
- Windows Vista. If you are unable to view the icon in Windows, convert the
- image to "RGBA" mode before saving.
-
- This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
- .
- https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
- """
-
- format = "ICO"
- format_description = "Windows Icon"
-
- def _open(self):
- self.ico = IcoFile(self.fp)
- self.info["sizes"] = self.ico.sizes()
- self.size = self.ico.entry[0]["dim"]
- self.load()
-
- @property
- def size(self):
- return self._size
-
- @size.setter
- def size(self, value):
- if value not in self.info["sizes"]:
- msg = "This is not one of the allowed sizes of this image"
- raise ValueError(msg)
- self._size = value
-
- def load(self):
- if self.im is not None and self.im.size == self.size:
- # Already loaded
- return Image.Image.load(self)
- im = self.ico.getimage(self.size)
- # if tile is PNG, it won't really be loaded yet
- im.load()
- self.im = im.im
- self.pyaccess = None
- self.mode = im.mode
- if im.size != self.size:
- warnings.warn("Image was not the expected size")
-
- index = self.ico.getentryindex(self.size)
- sizes = list(self.info["sizes"])
- sizes[index] = im.size
- self.info["sizes"] = set(sizes)
-
- self.size = im.size
-
- def load_seek(self):
- # Flag the ImageFile.Parser so that it
- # just does all the decode at the end.
- pass
-
-
-#
-# --------------------------------------------------------------------
-
-
-Image.register_open(IcoImageFile.format, IcoImageFile, _accept)
-Image.register_save(IcoImageFile.format, _save)
-Image.register_extension(IcoImageFile.format, ".ico")
-
-Image.register_mime(IcoImageFile.format, "image/x-icon")
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/MpoImagePlugin.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/MpoImagePlugin.py
deleted file mode 100644
index f9261c77d6862d7def90c6136dff6449241b0690..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/MpoImagePlugin.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# MPO file handling
-#
-# See "Multi-Picture Format" (CIPA DC-007-Translation 2009, Standard of the
-# Camera & Imaging Products Association)
-#
-# The multi-picture object combines multiple JPEG images (with a modified EXIF
-# data format) into a single file. While it can theoretically be used much like
-# a GIF animation, it is commonly used to represent 3D photographs and is (as
-# of this writing) the most commonly used format by 3D cameras.
-#
-# History:
-# 2014-03-13 Feneric Created
-#
-# See the README file for information on usage and redistribution.
-#
-
-import itertools
-import os
-import struct
-
-from . import (
- ExifTags,
- Image,
- ImageFile,
- ImageSequence,
- JpegImagePlugin,
- TiffImagePlugin,
-)
-from ._binary import i16be as i16
-from ._binary import o32le
-
-# def _accept(prefix):
-# return JpegImagePlugin._accept(prefix)
-
-
-def _save(im, fp, filename):
- JpegImagePlugin._save(im, fp, filename)
-
-
-def _save_all(im, fp, filename):
- append_images = im.encoderinfo.get("append_images", [])
- if not append_images:
- try:
- animated = im.is_animated
- except AttributeError:
- animated = False
- if not animated:
- _save(im, fp, filename)
- return
-
- mpf_offset = 28
- offsets = []
- for imSequence in itertools.chain([im], append_images):
- for im_frame in ImageSequence.Iterator(imSequence):
- if not offsets:
- # APP2 marker
- im_frame.encoderinfo["extra"] = (
- b"\xFF\xE2" + struct.pack(">H", 6 + 82) + b"MPF\0" + b" " * 82
- )
- exif = im_frame.encoderinfo.get("exif")
- if isinstance(exif, Image.Exif):
- exif = exif.tobytes()
- im_frame.encoderinfo["exif"] = exif
- if exif:
- mpf_offset += 4 + len(exif)
-
- JpegImagePlugin._save(im_frame, fp, filename)
- offsets.append(fp.tell())
- else:
- im_frame.save(fp, "JPEG")
- offsets.append(fp.tell() - offsets[-1])
-
- ifd = TiffImagePlugin.ImageFileDirectory_v2()
- ifd[0xB000] = b"0100"
- ifd[0xB001] = len(offsets)
-
- mpentries = b""
- data_offset = 0
- for i, size in enumerate(offsets):
- if i == 0:
- mptype = 0x030000 # Baseline MP Primary Image
- else:
- mptype = 0x000000 # Undefined
- mpentries += struct.pack(" 1
- self._fp = self.fp # FIXME: hack
- self._fp.seek(self.__mpoffsets[0]) # get ready to read first frame
- self.__frame = 0
- self.offset = 0
- # for now we can only handle reading and individual frame extraction
- self.readonly = 1
-
- def load_seek(self, pos):
- self._fp.seek(pos)
-
- def seek(self, frame):
- if not self._seek_check(frame):
- return
- self.fp = self._fp
- self.offset = self.__mpoffsets[frame]
-
- self.fp.seek(self.offset + 2) # skip SOI marker
- segment = self.fp.read(2)
- if not segment:
- msg = "No data found for frame"
- raise ValueError(msg)
- self._size = self._initial_size
- if i16(segment) == 0xFFE1: # APP1
- n = i16(self.fp.read(2)) - 2
- self.info["exif"] = ImageFile._safe_read(self.fp, n)
- self._reload_exif()
-
- mptype = self.mpinfo[0xB002][frame]["Attribute"]["MPType"]
- if mptype.startswith("Large Thumbnail"):
- exif = self.getexif().get_ifd(ExifTags.IFD.Exif)
- if 40962 in exif and 40963 in exif:
- self._size = (exif[40962], exif[40963])
- elif "exif" in self.info:
- del self.info["exif"]
- self._reload_exif()
-
- self.tile = [("jpeg", (0, 0) + self.size, self.offset, (self.mode, ""))]
- self.__frame = frame
-
- def tell(self):
- return self.__frame
-
- @staticmethod
- def adopt(jpeg_instance, mpheader=None):
- """
- Transform the instance of JpegImageFile into
- an instance of MpoImageFile.
- After the call, the JpegImageFile is extended
- to be an MpoImageFile.
-
- This is essentially useful when opening a JPEG
- file that reveals itself as an MPO, to avoid
- double call to _open.
- """
- jpeg_instance.__class__ = MpoImageFile
- jpeg_instance._after_jpeg_open(mpheader)
- return jpeg_instance
-
-
-# ---------------------------------------------------------------------
-# Registry stuff
-
-# Note that since MPO shares a factory with JPEG, we do not need to do a
-# separate registration for it here.
-# Image.register_open(MpoImageFile.format,
-# JpegImagePlugin.jpeg_factory, _accept)
-Image.register_save(MpoImageFile.format, _save)
-Image.register_save_all(MpoImageFile.format, _save_all)
-
-Image.register_extension(MpoImageFile.format, ".mpo")
-
-Image.register_mime(MpoImageFile.format, "image/mpo")
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/oxml/settings.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/oxml/settings.py
deleted file mode 100644
index fd319ad707f3f5e8313bd6e94ed7f17f811f2787..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/docx/oxml/settings.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# encoding: utf-8
-
-"""Custom element classes related to document settings"""
-
-from __future__ import absolute_import, division, print_function, unicode_literals
-
-from docx.oxml.xmlchemy import BaseOxmlElement, ZeroOrOne
-
-
-class CT_Settings(BaseOxmlElement):
- """`w:settings` element, root element for the settings part"""
-
- _tag_seq = (
- "w:writeProtection", "w:view", "w:zoom", "w:removePersonalInformation",
- "w:removeDateAndTime", "w:doNotDisplayPageBoundaries",
- "w:displayBackgroundShape", "w:printPostScriptOverText",
- "w:printFractionalCharacterWidth", "w:printFormsData", "w:embedTrueTypeFonts",
- "w:embedSystemFonts", "w:saveSubsetFonts", "w:saveFormsData", "w:mirrorMargins",
- "w:alignBordersAndEdges", "w:bordersDoNotSurroundHeader",
- "w:bordersDoNotSurroundFooter", "w:gutterAtTop", "w:hideSpellingErrors",
- "w:hideGrammaticalErrors", "w:activeWritingStyle", "w:proofState",
- "w:formsDesign", "w:attachedTemplate", "w:linkStyles",
- "w:stylePaneFormatFilter", "w:stylePaneSortMethod", "w:documentType",
- "w:mailMerge", "w:revisionView", "w:trackRevisions", "w:doNotTrackMoves",
- "w:doNotTrackFormatting", "w:documentProtection", "w:autoFormatOverride",
- "w:styleLockTheme", "w:styleLockQFSet", "w:defaultTabStop", "w:autoHyphenation",
- "w:consecutiveHyphenLimit", "w:hyphenationZone", "w:doNotHyphenateCaps",
- "w:showEnvelope", "w:summaryLength", "w:clickAndTypeStyle",
- "w:defaultTableStyle", "w:evenAndOddHeaders", "w:bookFoldRevPrinting",
- "w:bookFoldPrinting", "w:bookFoldPrintingSheets",
- "w:drawingGridHorizontalSpacing", "w:drawingGridVerticalSpacing",
- "w:displayHorizontalDrawingGridEvery", "w:displayVerticalDrawingGridEvery",
- "w:doNotUseMarginsForDrawingGridOrigin", "w:drawingGridHorizontalOrigin",
- "w:drawingGridVerticalOrigin", "w:doNotShadeFormData", "w:noPunctuationKerning",
- "w:characterSpacingControl", "w:printTwoOnOne", "w:strictFirstAndLastChars",
- "w:noLineBreaksAfter", "w:noLineBreaksBefore", "w:savePreviewPicture",
- "w:doNotValidateAgainstSchema", "w:saveInvalidXml", "w:ignoreMixedContent",
- "w:alwaysShowPlaceholderText", "w:doNotDemarcateInvalidXml",
- "w:saveXmlDataOnly", "w:useXSLTWhenSaving", "w:saveThroughXslt",
- "w:showXMLTags", "w:alwaysMergeEmptyNamespace", "w:updateFields",
- "w:hdrShapeDefaults", "w:footnotePr", "w:endnotePr", "w:compat", "w:docVars",
- "w:rsids", "m:mathPr", "w:attachedSchema", "w:themeFontLang",
- "w:clrSchemeMapping", "w:doNotIncludeSubdocsInStats",
- "w:doNotAutoCompressPictures", "w:forceUpgrade", "w:captions",
- "w:readModeInkLockDown", "w:smartTagType", "sl:schemaLibrary",
- "w:shapeDefaults", "w:doNotEmbedSmartTags", "w:decimalSymbol", "w:listSeparator"
- )
- evenAndOddHeaders = ZeroOrOne("w:evenAndOddHeaders", successors=_tag_seq[48:])
- del _tag_seq
-
- @property
- def evenAndOddHeaders_val(self):
- """value of `w:evenAndOddHeaders/@w:val` or |None| if not present."""
- evenAndOddHeaders = self.evenAndOddHeaders
- if evenAndOddHeaders is None:
- return False
- return evenAndOddHeaders.val
-
- @evenAndOddHeaders_val.setter
- def evenAndOddHeaders_val(self, value):
- if value in [None, False]:
- self._remove_evenAndOddHeaders()
- else:
- self.get_or_add_evenAndOddHeaders().val = value
diff --git a/spaces/cihyFjudo/fairness-paper-search/Ap Statistics Investigative Task Chapter 10 Alligators Answerszip A Real Study on Alligator Biology and Ecology.md b/spaces/cihyFjudo/fairness-paper-search/Ap Statistics Investigative Task Chapter 10 Alligators Answerszip A Real Study on Alligator Biology and Ecology.md
deleted file mode 100644
index 9d3c28bbe97d53d61e5acd1bec47715ed329a8be..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Ap Statistics Investigative Task Chapter 10 Alligators Answerszip A Real Study on Alligator Biology and Ecology.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Ap Statistics Investigative Task Chapter 10 Alligators Answerszip
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Edius 6.5 Full Tek Link The Benefits of Using this Software for HDR Editing Grading and Delivery.md b/spaces/cihyFjudo/fairness-paper-search/Edius 6.5 Full Tek Link The Benefits of Using this Software for HDR Editing Grading and Delivery.md
deleted file mode 100644
index 3d096c9f511d2b9892aaea1b713bcc4bd4106d9a..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Edius 6.5 Full Tek Link The Benefits of Using this Software for HDR Editing Grading and Delivery.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
View High Dynamic Range video the way it was meant to be seen. The amazing new MacBook Pro display delivers up to 7.7 million pixels and 1,000 nits of sustained brightness for HDR editing, grading and delivery with Final Cut Pro. Or connect the gorgeous Pro Display XDR to your Mac for a full 6K HDR playback and finishing solution.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Kanchana 2 Movie Download Tamilrockers 176 Dont Miss the Terrifying and Amusing Film.md b/spaces/cihyFjudo/fairness-paper-search/Kanchana 2 Movie Download Tamilrockers 176 Dont Miss the Terrifying and Amusing Film.md
deleted file mode 100644
index 9782ef711da1e09b51c73e89a7805173446d2328..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Kanchana 2 Movie Download Tamilrockers 176 Dont Miss the Terrifying and Amusing Film.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacenc_pred.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacenc_pred.h
deleted file mode 100644
index aa305f45a5dc38530319f928e30bda6b715fcff3..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacenc_pred.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * AAC encoder main-type prediction
- * Copyright (C) 2015 Rostislav Pehlivanov
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * AAC encoder main-type prediction
- * @author Rostislav Pehlivanov ( atomnuker gmail com )
- */
-
-#ifndef AVCODEC_AACENC_PRED_H
-#define AVCODEC_AACENC_PRED_H
-
-#include "aacenc.h"
-
-/* Every predictor group needs to get reset at least once in this many frames */
-#define PRED_RESET_FRAME_MIN 240
-
-/* Any frame with less than this amount of frames since last reset is ok */
-#define PRED_RESET_MIN 64
-
-/* Raise to filter any low frequency artifacts due to prediction */
-#define PRED_SFB_START 10
-
-void ff_aac_apply_main_pred(AACEncContext *s, SingleChannelElement *sce);
-void ff_aac_adjust_common_pred(AACEncContext *s, ChannelElement *cpe);
-void ff_aac_search_for_pred(AACEncContext *s, SingleChannelElement *sce);
-void ff_aac_encode_main_pred(AACEncContext *s, SingleChannelElement *sce);
-
-#endif /* AVCODEC_AACENC_PRED_H */
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aandcttab.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aandcttab.h
deleted file mode 100644
index b0a2f44ecd5c032acedcf6054de49c2e88f73062..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aandcttab.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * AAN (Arai, Agui and Nakajima) (I)DCT tables
- */
-
-#ifndef AVCODEC_AANDCTTAB_H
-#define AVCODEC_AANDCTTAB_H
-
-#include
-
-extern const uint16_t ff_aanscales[64];
-extern const uint16_t ff_inv_aanscales[64];
-
-#endif /* AVCODEC_AANDCTTAB_H */
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/blockdsp_init_arm.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/blockdsp_init_arm.c
deleted file mode 100644
index 2080d5253f9e74cd97240dc510e12ba8c9accbaa..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/blockdsp_init_arm.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * ARM optimized block operations
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/attributes.h"
-#include "libavutil/cpu.h"
-#include "libavutil/arm/cpu.h"
-#include "libavcodec/blockdsp.h"
-#include "blockdsp_arm.h"
-
-av_cold void ff_blockdsp_init_arm(BlockDSPContext *c)
-{
- int cpu_flags = av_get_cpu_flags();
-
- if (have_neon(cpu_flags))
- ff_blockdsp_init_neon(c);
-}
diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdata.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdata.h
deleted file mode 100644
index 31191a84759eeefb0cfb5861be277e08c6acc62f..0000000000000000000000000000000000000000
--- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/dvdata.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_DVDATA_H
-#define AVCODEC_DVDATA_H
-
-#include
-
-extern const uint8_t ff_dv_zigzag248_direct[64];
-
-extern const uint8_t ff_dv_quant_shifts[22][4];
-extern const uint8_t ff_dv_quant_offset[4];
-
-#define NB_DV_VLC 409
-/* The number of entries with value zero in ff_dv_vlc_level. */
-#define NB_DV_ZERO_LEVEL_ENTRIES 72
-
-extern const uint8_t ff_dv_vlc_len[NB_DV_VLC];
-extern const uint8_t ff_dv_vlc_run[NB_DV_VLC];
-extern const uint8_t ff_dv_vlc_level[NB_DV_VLC];
-
-#endif /* AVCODEC_DVDATA_H */
diff --git a/spaces/coledie/Fashion_VAE/model.py b/spaces/coledie/Fashion_VAE/model.py
deleted file mode 100644
index fd9329c4268078b595af4ea978976fda19897ef5..0000000000000000000000000000000000000000
--- a/spaces/coledie/Fashion_VAE/model.py
+++ /dev/null
@@ -1,137 +0,0 @@
-"""MNIST digit classificatin."""
-import matplotlib.pyplot as plt
-import numpy as np
-import torch
-import torch.nn as nn
-import torchvision.datasets
-import torch.nn.functional as F
-from torchvision import transforms
-
-
-class Encoder(nn.Module):
- def __init__(self, image_dim, latent_dim):
- super().__init__()
- self.image_dim = image_dim
- self.latent_dim = latent_dim
- self.cnn = nn.Sequential(
- nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, padding=2),
- nn.MaxPool2d(kernel_size=2),
- nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2),
- nn.MaxPool2d(kernel_size=2),
- nn.Flatten(1, -1),
- )
- self.l_mu = nn.Linear(1568, np.product(self.latent_dim))
- self.l_sigma = nn.Linear(1568, np.product(self.latent_dim))
-
- def forward(self, x):
- x = x.reshape((-1, 1, *self.image_dim))
- x = self.cnn(x)
- mu = self.l_mu(x)
- sigma = self.l_sigma(x)
- return mu, sigma
-
-
-class Decoder(nn.Module):
- def __init__(self, image_dim, latent_dim):
- super().__init__()
- self.image_dim = image_dim
- self.latent_dim = latent_dim
- self.cnn = nn.Sequential(
- nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, padding=2),
- nn.MaxPool2d(kernel_size=2),
- nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=2),
- nn.MaxPool2d(kernel_size=2),
- nn.Flatten(1, -1),
- nn.Linear(288, np.product(self.image_dim)),
- nn.Sigmoid(),
- )
-
- def forward(self, c):
- c = c.reshape((-1, 1, *self.latent_dim))
- x = self.cnn(c)
- return x
-
-
-class VAE(nn.Module):
- def __init__(self, image_dim=(28, 28), latent_dim=(14, 14)):
- super().__init__()
- self.image_dim = image_dim
- self.encoder = Encoder(image_dim, latent_dim)
- self.decoder = Decoder(image_dim, latent_dim)
-
- def forward(self, x):
- x = x.reshape((-1, 1, *self.image_dim))
- mu, sigma = self.encoder(x)
- c = mu + sigma * torch.randn_like(sigma)
- xhat = self.decoder(c)
- return xhat, mu, sigma
-
-
-if __name__ == '__main__':
- N_EPOCHS = 100
- LEARNING_RATE = .001
-
- model = VAE().cuda()
- optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
- loss_fn = torch.nn.MSELoss()
-
- dataset_base = torchvision.datasets.FashionMNIST("MNIST", download=True, transform=transforms.ToTensor())
-
- dataset_base_2 = torchvision.datasets.MNIST("MNIST", download=True, transform=transforms.ToTensor())
- dataset_base = torch.utils.data.ConcatDataset([dataset_base, dataset_base_2])
-
- dataset_train, dataset_test = torch.utils.data.random_split(
- dataset_base, (int(.8 * len(dataset_base)), int(.2 * len(dataset_base)))
- )
-
- model.train()
- dataloader = torch.utils.data.DataLoader(dataset_train,
- batch_size=512,
- shuffle=True,
- num_workers=0)
- i = 0
- for epoch in range(N_EPOCHS):
- total_loss = 0
- for x, label in dataloader:
- #for j in range(512):
- # plt.imsave(f"{i}-{label[j]}.jpg", np.stack([x[j].reshape((28, 28)).detach().numpy()] * 3, -1), cmap='gray')
- # i += 1
- #exit()
- x = x.cuda()
- label = label.cuda()
- optimizer.zero_grad()
- xhat, mu, logvar = model(x)
-
- BCE = F.binary_cross_entropy(xhat, x.reshape(xhat.shape), reduction='mean')
- # https://arxiv.org/abs/1312.6114
- # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
- KLD = -0.5 * torch.mean(1 + logvar - mu.pow(2) - logvar.exp())
- loss = BCE + KLD
- loss.backward()
- optimizer.step()
- total_loss += loss.item()
- print(f"{epoch}: {total_loss:.4f}")
-
- model.cpu()
- with open("vae.pt", "wb") as file:
- torch.save(model, file)
- model.eval()
- dataloader = torch.utils.data.DataLoader(dataset_test,
- batch_size=512,
- shuffle=True,
- num_workers=0)
- n_correct = 0
-
- COLS = 4
- ROWS = 4
- fig, axes = plt.subplots(ncols=COLS, nrows=ROWS, figsize=(5.5, 3.5),
- constrained_layout=True)
-
- dataloader_gen = iter(dataloader)
- x, label = next(dataloader_gen)
- xhat, mu, logvar = model(x)
- xhat = xhat.reshape((-1, 28, 28))
- for row in range(ROWS):
- for col in range(COLS):
- axes[row, col].imshow(xhat[row * COLS + col].detach().numpy(), cmap="gray")
- plt.show()
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Enjoy the most realistic driving physics endless customization and huge open world with Ultimate Car Driving Simulator MOD APK (VIP Unlocked Unlimited Money).md b/spaces/congsaPfin/Manga-OCR/logs/Enjoy the most realistic driving physics endless customization and huge open world with Ultimate Car Driving Simulator MOD APK (VIP Unlocked Unlimited Money).md
deleted file mode 100644
index d23da2e43fba31bdc3c4be8dfd93024762fc1419..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Enjoy the most realistic driving physics endless customization and huge open world with Ultimate Car Driving Simulator MOD APK (VIP Unlocked Unlimited Money).md
+++ /dev/null
@@ -1,131 +0,0 @@
-
-
Ultimate Car Driving Simulator Mod APK: VIP Unlocked, Unlimited Money
-
If you are a fan of racing games and want to experience the thrill of driving realistic cars on various terrains, then you should try Ultimate Car Driving Simulator. This game is one of the best car driving simulators on Android, with a huge open world map, realistic physics and graphics, and customizable cars and garage. However, if you want to enjoy the game to the fullest, you might need some extra resources and features that are not available in the original version. That's why we recommend you to download Ultimate Car Driving Simulator Mod APK, which gives you VIP unlocked, unlimited money, and more. In this article, we will tell you everything you need to know about this mod apk, including its features, pros and cons, and how to download and install it on your device.
-
ultimate car driving simulator mod apk vip unlocked unlimited money
Ultimate Car Driving Simulator is a racing game developed by Sir Studios, which has over 100 million downloads on Google Play. The game lets you drive various types of cars, from sports cars to off-road vehicles, on a huge open world map with different locations and environments. You can customize your car's appearance and performance, upgrade and repair it in your garage, and explore the map at your own pace. You can also choose from different game modes, such as free roam, traffic mode, checkpoint mode, or drift mode.
-
However, some features and resources in the game are limited or require real money to unlock. For example, you need to watch ads or pay money to unlock some cars or get more money. You also need to earn VIP points to access some exclusive features, such as removing ads, unlocking all cars, getting double rewards, and more. That's why some players prefer to use Ultimate Car Driving Simulator Mod APK, which is a modified version of the game that gives you VIP unlocked, unlimited money, and more. With this mod apk, you can enjoy the game without any restrictions or limitations.
-
To download and install Ultimate Car Driving Simulator Mod APK on your device, you need to follow these simple steps:
-
* ultimate car simulator modded apk with vip features and unlimited cash
-* download ultimate car driving simulation mod apk free vip and money
-* ultimate car driving simulator hack apk vip access and infinite money
-* ultimate car driving simulator premium mod apk unlimited coins and vip
-* ultimate car driving simulator cracked apk with vip unlocked and money
-* ultimate car simulator mod apk free download vip and unlimited money
-* ultimate car driving simulator cheat apk with infinite money and vip
-* ultimate car driving simulator full mod apk unlimited money and vip
-* ultimate car driving simulator latest mod apk vip and money
-* ultimate car driving simulator pro mod apk with vip features and money
-* ultimate car simulator hacked apk free vip and unlimited money
-* ultimate car driving simulator unlocked mod apk with money and vip
-* ultimate car driving simulator modded game apk vip and unlimited money
-* ultimate car driving simulator 2020 mod apk with vip and money
-* ultimate car driving simulator best mod apk unlimited money and vip
-* ultimate car simulator cheat game apk with vip features and money
-* ultimate car driving simulator unlimited cash and vip mod apk
-* ultimate car driving simulator free premium and money mod apk
-* ultimate car driving simulator infinite coins and vip mod apk
-* ultimate car driving simulator 2021 mod apk with money and vip
-* ultimate car simulator cracked game apk with unlimited money and vip
-* ultimate car driving simulator hack game apk with money and vip
-* ultimate car driving simulator mod version apk with vip and money
-* ultimate car driving simulator no ads and money mod apk
-* ultimate car driving simulator all cars unlocked and money mod apk
-* ultimate car driving simulator realistic physics and money mod apk
-* ultimate car driving simulator open world and money mod apk
-* ultimate car driving simulator custom cars and money mod apk
-* ultimate car driving simulator high graphics and money mod apk
-* ultimate car driving simulator offline mode and money mod apk
-* ultimate car simulator premium version mod apk with money and vip
-* ultimate car driving simulator latest version mod apk with vip and money
-* ultimate car driving simulator new update mod apk with money and vip
-* ultimate car driving simulator online multiplayer mod apk with vip and money
-* ultimate car driving simulator fun gameplay mod apk with money and vip
-* ultimate car simulator full version mod apk with unlimited money and vip
-* ultimate car driving simulator easy controls and money mod apk
-* ultimate car driving simulator realistic sounds and money mod apk
-* ultimate car driving simulator fast cars and money mod apk
-* ultimate car driving simulator racing mode and money mod apk
-
-
Click on this link to download the mod apk file.
-
Go to your device's settings and enable installation from unknown sources.
-
Locate the downloaded file in your file manager and tap on it to install it.
-
Launch the game and enjoy VIP unlocked, unlimited money, and more.
-
-
Features of Ultimate Car Driving Simulator Mod APK
-
VIP Unlocked
-
One of the main features of Ultimate Car Driving Simulator Mod APK is that it gives you VIP unlocked. This means that you can access all the benefits of being a VIP without spending any money or earning any VIP points. Some of these benefits are:
-
-
All cars unlocked: You can drive any car you want without having to unlock it with money or ads.
-
Double rewards: You can earn twice as much money and VIP points from completing missions and challenges.
-
Exclusive cars: You can access some special cars that are only available for VIPs, such as the Bugatti Chiron, the Lamborghini Veneno, or the Ferrari LaFerrari.
-
-
To activate the VIP mode, you just need to tap on the VIP icon on the top right corner of the screen. You will see a green check mark indicating that you are a VIP. You can also deactivate the VIP mode anytime you want by tapping on the icon again.
-
Unlimited Money
-
Another feature of Ultimate Car Driving Simulator Mod APK is that it gives you unlimited money. This means that you can buy anything you want in the game without worrying about running out of money. You can use your money to:
-
-
Buy new cars: You can choose from over 80 different cars, from classic cars to supercars, from muscle cars to off-road vehicles.
-
Customize your cars: You can change your car's color, paint, wheels, spoilers, exhausts, and more.
-
Upgrade your cars: You can improve your car's speed, acceleration, handling, braking, and more.
-
Repair your cars: You can fix any damage or wear on your car's body or engine.
-
-
To get unlimited money in the game, you just need to play normally and earn money from completing missions and challenges. You will see that your money will never decrease, no matter how much you spend. You can also use the double rewards feature to get more money faster.
-
Realistic Physics and Graphics
-
Ultimate Car Driving Simulator Mod APK also enhances the realistic physics and graphics of the game. This means that you can enjoy a more immersive and realistic car driving experience on your device. Some of the aspects that are improved are:
-
-
Physics: The game simulates real car physics, such as gravity, inertia, friction, collision, suspension, and more. You can feel the difference between driving on asphalt, dirt, grass, or snow. You can also perform realistic stunts, such as drifting, jumping, or crashing.
-
Graphics: The game offers high-quality graphics options and settings, such as resolution, texture quality, shadow quality, anti-aliasing, and more. You can adjust them according to your device's performance and preference. You can also enjoy realistic lighting effects, such as day and night cycle, sun rays, reflections, or lens flare.
-
-
Huge Open World Map
-
Ultimate Car Driving Simulator Mod APK also expands the huge open world map of the game. This means that you can explore and discover more places and environments in the game. Some of the features of the map are:
-
-
Size: The map is one of the biggest and most detailed maps in a car driving simulator game. It covers over 16 square kilometers of land, with different terrains, such as city, desert, forest, mountain, and more.
-
Locations: The map has various locations and landmarks that you can visit and interact with, such as airports, bridges, tunnels, gas stations, ramps, loops, and more.
-
Challenges: The map also has various challenges and missions that you can complete and earn rewards, such as racing, drifting, jumping, or smashing.
-
-
To explore and discover the huge open world map of the game, you just need to drive around and follow the signs and markers on the map. You can also use the mini-map on the top left corner of the screen to navigate and find your destination. You can also switch between different camera views by tapping on the camera icon on the bottom right corner of the screen.
-
Customizable Cars and Garage
-
Ultimate Car Driving Simulator Mod APK also enhances the customizable cars and garage of the game. This means that you can personalize your car's appearance and performance according to your taste and style. Some of the features of the cars and garage are:
-
-
Cars: The game has over 80 different cars that you can drive and customize, from classic cars to supercars, from muscle cars to off-road vehicles. Each car has its own characteristics and stats, such as speed, acceleration, handling, braking, and more.
-
Customization: The game allows you to customize your car's appearance and performance in various ways. You can change your car's color, paint, wheels, spoilers, exhausts, and more. You can also improve your car's speed, acceleration, handling, braking, and more.
-
Garage: The game also has a garage where you can store, upgrade, and repair your cars. You can access your garage by tapping on the garage icon on the bottom left corner of the screen. You can also visit other players' garages and see their cars.
-
-
Pros and Cons of Ultimate Car Driving Simulator Mod APK
-
Ultimate Car Driving Simulator Mod APK is a great way to enjoy the game without any limitations or restrictions. However, it also has some drawbacks and risks that you should be aware of before using it. Here are some of the pros and cons of Ultimate Car Driving Simulator Mod APK:
-
Pros
-
Some of the advantages of using Ultimate Car Driving Simulator Mod APK are:
-
-
You can access all the features and resources of the game for free, such as VIP unlocked, unlimited money, all cars unlocked, and more.
-
You can customize your car's appearance and performance in any way you want, without worrying about money or ads.
-
You can enjoy a realistic and immersive car driving experience, with realistic physics and graphics, and a huge open world map.
-
You can have fun and challenge yourself with various game modes, challenges, and missions.
-
You can share your cars and garage with other players and see their cars.
-
-
Cons
-
Some of the disadvantages or risks of using Ultimate Car Driving Simulator Mod APK are:
-
-
You might face some compatibility or performance issues on your device, depending on your device's specifications and settings.
-
You might encounter some bugs or glitches in the game, such as crashes, freezes, or errors.
-
You might lose your progress or data if you uninstall the mod apk or update the game.
-
You might get banned or suspended from the game if you use the mod apk online or in multiplayer mode.
-
You might violate the terms and conditions of the game or the Google Play Store by using the mod apk.
-
-
Conclusion
-
Ultimate Car Driving Simulator is one of the best car driving simulators on Android, with a huge open world map, realistic physics and graphics, and customizable cars and garage. However, if you want to enjoy the game to the fullest, you might need some extra resources and features that are not available in the original version. That's why we recommend you to download Ultimate Car Driving Simulator Mod APK, which gives you VIP unlocked, unlimited money, and more. With this mod apk, you can enjoy the game without any restrictions or limitations. However, you should also be careful of the drawbacks and risks of using the mod apk, such as compatibility issues, bugs, data loss, or bans. Therefore, you should use the mod apk at your own risk and discretion.
-
We hope this article has helped you to learn more about Ultimate Car Driving Simulator Mod APK and how to download and install it on your device. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!
-
FAQs
-
Here are some of the frequently asked questions and answers about Ultimate Car Driving Simulator Mod APK:
-
-
Is Ultimate Car Driving Simulator Mod APK safe to use? Ultimate Car Driving Simulator Mod APK is generally safe to use, as long as you download it from a trusted source and scan it with an antivirus before installing it. However, you should also be aware of the potential risks of using a mod apk, such as compatibility issues, bugs, data loss, or bans. Therefore, you should use the mod apk at your own risk and discretion.
-
Can I play Ultimate Car Driving Simulator Mod APK online or with other players? Ultimate Car Driving Simulator Mod APK is mainly designed for offline or single-player mode. However, you can also play online or with other players in some game modes, such as traffic mode or checkpoint mode. However, you should be careful not to use the mod apk online or in multiplayer mode too often or too blatantly, as you might get banned or suspended from the game for cheating or hacking.
-
What are some alternatives to Ultimate Car Driving Simulator Mod APK? If you are looking for some other car driving simulator games that you can play on your Android device, you might want to check out these alternatives:
-
-
Real Driving Sim: This game is another realistic car driving simulator game, with over 80 cars, 20 cities, and various game modes. You can also customize your car's appearance and performance, and enjoy realistic physics and graphics.
-
Extreme Car Driving Simulator: This game is a fun and exciting car driving simulator game, with over 25 cars, 3 maps, and various game modes. You can also perform stunts, drifts, and crashes, and enjoy realistic physics and graphics.
-
Car Simulator 2: This game is a multiplayer car driving simulator game, with over 30 cars, 3 maps, and various game modes. You can also interact with other players, join races and clubs, and enjoy realistic physics and graphics.
-
-
How can I contact the developers of Ultimate Car Driving Simulator Mod APK? If you have any questions, feedback, or issues regarding Ultimate Car Driving Simulator Mod APK, you can contact the developers of the mod apk by visiting their website or sending them an email at . You can also follow them on their social media accounts to get the latest news and updates about the mod apk.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Get Geometry Dash Meltdown Mod APK for Free on Android Devices.md b/spaces/congsaPfin/Manga-OCR/logs/How to Get Geometry Dash Meltdown Mod APK for Free on Android Devices.md
deleted file mode 100644
index 193b4b40171f31529877fc7fb77fb15a63e4f815..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/How to Get Geometry Dash Meltdown Mod APK for Free on Android Devices.md
+++ /dev/null
@@ -1,85 +0,0 @@
-
-
Geometry Dash Meltdown Mod APK: A Fun and Challenging Platformer Game
-
If you are looking for a game that can test your reflexes, skills, and patience, then you should try Geometry Dash Meltdown. This is a platformer game that will make you jump, fly, and flip your way through various levels filled with spikes, fire, monsters, and other obstacles. You will need to time your moves perfectly to avoid crashing and losing your progress. Geometry Dash Meltdown is a game that will keep you entertained and challenged for hours.
Geometry Dash Meltdown is a spin-off of the popular Geometry Dash series by RobTop Games. It was released in 2015 as a free game for Android and iOS devices. The game features three exclusive levels: The Seven Seas, Viking Arena, and Airborne Robots. Each level has its own theme, music, and difficulty. You can also create your own levels using the level editor and share them with other players online.
-
Features of Geometry Dash Meltdown
-
Unique and colorful graphics
-
One of the most appealing aspects of Geometry Dash Meltdown is its graphics. The game has a vibrant and neon-like style that suits the fast-paced and energetic gameplay. The backgrounds, obstacles, and characters are all designed with geometric shapes and patterns that create a visually stunning effect. You will never get bored of the game's aesthetics as they change with every level.
-
Dynamic and rhythmic music
-
Another feature that makes Geometry Dash Meltdown stand out is its music. The game has an original soundtrack composed by F-777, a famous electronic music producer. The music matches the mood and tempo of each level, creating a immersive and exhilarating experience. You will feel the adrenaline rush as you jump and dash to the beat of the music.
-
Simple and intuitive controls
-
The game has very simple and intuitive controls that anyone can learn in seconds. You only need to tap the screen to make your character jump, fly, or flip. You can also hold the screen to perform continuous jumps or flights. The game does not require any complicated gestures or buttons to play. However, do not let the simplicity fool you. The game is very hard to master as it requires precise timing and coordination.
-
Various levels and obstacles
-
The game has three official levels that you can play: The Seven Seas, Viking Arena, and Airborne Robots. Each level has its own theme, music, and difficulty. You will face different obstacles such as spikes, fireballs, saws, lasers, monsters, portals, gravity switches, and more. You will need to overcome these challenges to reach the end of each level. You can also play user-generated levels that are created by other players using the level editor.
-
Customizable characters and icons
-
The game allows you to customize your character and icon with different colors, shapes, faces, trails, and effects. You can unlock these options by collecting stars, coins, diamonds, keys, shards, orbs, and other items in the game. You can also use these items to unlock new game modes such as practice mode, gauntlets mode, map packs mode, quests mode, daily levels mode, weekly demon mode, hall of fame mode, vaults mode, shops mode, secret rooms mode and more.
-
free geometry dash meltdown apk mod unlocked
-download geometry dash meltdown mod apk latest version
-geometry dash meltdown hack mod apk free download
-free download geometry dash meltdown full version mod apk
-geometry dash meltdown mod apk download for android
-how to download geometry dash meltdown mod apk for free
-geometry dash meltdown unlimited coins mod apk free download
-free download geometry dash meltdown premium mod apk
-geometry dash meltdown mod apk no ads free download
-download geometry dash meltdown mod apk all levels unlocked
-geometry dash meltdown mega mod apk free download
-free download geometry dash meltdown hacked apk mod
-geometry dash meltdown mod apk offline free download
-download geometry dash meltdown mod apk unlimited money
-geometry dash meltdown pro mod apk free download
-free download geometry dash meltdown cheat mod apk
-geometry dash meltdown mod apk android 1 free download
-download geometry dash meltdown mod apk revdl
-geometry dash meltdown vip mod apk free download
-free download geometry dash meltdown cracked mod apk
-geometry dash meltdown mod apk 2.2.11 free download
-download geometry dash meltdown mod apk rexdl
-geometry dash meltdown plus mod apk free download
-free download geometry dash meltdown extreme mod apk
-geometry dash meltdown mod apk 2023 free download
-
Leaderboards and achievements
-
The game has online leaderboards where you can compare your scores and rankings with other players from around the world. You can also earn achievements by completing various tasks and challenges in the game. You can view your stats and progress in the game menu. You can also sync your data with your Google Play or Game Center account to save your progress and access it on different devices.
-
Why download Geometry Dash Meltdown Mod APK?
-
Geometry Dash Meltdown is a fun and challenging game that will keep you hooked for hours. However, it can also be frustrating and annoying at times. You may encounter ads and pop-ups that interrupt your gameplay. You may also find some levels and features locked or restricted unless you pay real money or watch videos. You may also run out of lives or energy that limit your playtime.
-
That is why you should download Geometry Dash Meltdown Mod APK. This is a modified version of the game that gives you access to all the levels and features for free. You can also enjoy the game without any ads or distractions. You can also play the game as much as you want without worrying about lives or energy. You can have unlimited fun and challenge with Geometry Dash Meltdown Mod APK.
-
How to download and install Geometry Dash Meltdown Mod APK?
-
Step-by-step guide
-
If you want to download and install Geometry Dash Meltdown Mod APK, you can follow these simple steps:
-
-
Go to a trusted and reliable website that offers Geometry Dash Meltdown Mod APK. You can search for it on Google or use the link provided below.
-
Download the APK file to your device. Make sure you have enough storage space and a stable internet connection.
-
Before installing the APK file, you need to enable the "Unknown Sources" option on your device. This will allow you to install apps from sources other than the official app store. To do this, go to Settings > Security > Unknown Sources and toggle it on.
-
Locate the downloaded APK file on your device using a file manager app or your browser's downloads folder. Tap on it to start the installation process.
-
Follow the instructions on the screen to complete the installation. It may take a few seconds or minutes depending on your device's performance.
-
Once the installation is done, you can launch the game from your app drawer or home screen. Enjoy playing Geometry Dash Meltdown Mod APK!
-
-
Tips and tricks for playing Geometry Dash Meltdown
-
If you want to improve your skills and performance in Geometry Dash Meltdown, you can use these tips and tricks:
-
-
Practice makes perfect. The game is very hard and requires a lot of trial and error. You will need to play each level multiple times to memorize the patterns and timings of the obstacles. You can also use the practice mode to practice each level without losing lives or progress.
-
Use headphones. The game's music is not only catchy but also helpful. The music syncs with the gameplay and gives you cues on when to jump or dash. Using headphones will help you hear the music better and focus on the game.
-
Customize your character and icon. The game allows you to change the appearance of your character and icon with different colors, shapes, faces, trails, and effects. You can use these options to make your character and icon more visible and distinctive on the screen. This will help you avoid confusion and mistakes.
-
Watch videos of other players. You can learn a lot from watching how other players play each level. You can see how they avoid obstacles, collect items, and complete challenges. You can also get inspired by their creativity and style. You can watch videos of other players on YouTube or in the game's community section.
-
Have fun and don't give up. The game is meant to be fun and challenging, not frustrating and stressful. Don't let the game get to you if you fail or lose. Instead, try again and learn from your mistakes. Remember that every failure is an opportunity to improve and succeed.
-
-
Conclusion
-
Geometry Dash Meltdown is a platformer game that will test your reflexes, skills, and patience. It has unique and colorful graphics, dynamic and rhythmic music, simple and intuitive controls, various levels and obstacles, customizable characters and icons, leaderboards and achievements, and more. You can also download Geometry Dash Meltdown Mod APK to unlock all the levels and features, remove ads and distractions, and enjoy unlimited fun and challenge. You can download and install the game easily by following the step-by-step guide provided in this article. You can also use the tips and tricks to improve your skills and performance in the game. Geometry Dash Meltdown is a game that will make you addicted and satisfied.
-
FAQs
-
Here are some frequently asked questions about Geometry Dash Meltdown and Geometry Dash Meltdown Mod APK:
-
-
What is the difference between Geometry Dash Meltdown and Geometry Dash?
-
Geometry Dash Meltdown is a spin-off of Geometry Dash, a popular platformer game by RobTop Games. Geometry Dash Meltdown has three exclusive levels that are not available in Geometry Dash. Geometry Dash Meltdown is also free to play, while Geometry Dash costs $1.99 to download.
-
Is Geometry Dash Meltdown Mod APK safe to use?
-
Yes, Geometry Dash Meltdown Mod APK is safe to use as long as you download it from a trusted and reliable website. You should also scan the APK file with an antivirus app before installing it on your device. However, you should be aware that using modded apps may violate the terms and conditions of the original game and may result in bans or penalties.
-
How can I update Geometry Dash Meltdown Mod APK?
-
To update Geometry Dash Meltdown Mod APK, you need to download the latest version of the modded app from the same website where you downloaded the previous version. You can also check for updates on the website regularly or subscribe to their notifications. You should also uninstall the old version of the modded app before installing the new one.
-
How can I backup my progress in Geometry Dash Meltdown?
-
To backup your progress in Geometry Dash Meltdown, you need to create an account in the game and sync your data with your Google Play or Game Center account. You can also use cloud backup services such as Google Drive or Dropbox to save your data. You should backup your progress regularly to avoid losing it in case of data corruption or device loss.
-
How can I contact the developers of Geometry Dash Meltdown?
-
To contact the developers of Geometry Dash Meltdown, you can visit their official website at https://www.robtopgames.com/ or follow them on their social media accounts such as Facebook, Twitter, Instagram, YouTube, and Reddit. You can also email them at support@robtopgames.com or leave a comment or review on their app store page.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Leaf Green version game APK - The Most Authentic Pokemon Emulator for Android.md b/spaces/congsaPfin/Manga-OCR/logs/Leaf Green version game APK - The Most Authentic Pokemon Emulator for Android.md
deleted file mode 100644
index aba0d4138c1a430cf8e5bf247e30a0a2391e95aa..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Leaf Green version game APK - The Most Authentic Pokemon Emulator for Android.md
+++ /dev/null
@@ -1,142 +0,0 @@
-
-
How to Download Leaf Green APK for Android
-
If you are a fan of Pokemon games, you might have heard of Leaf Green APK, a remake of the classic Pokemon game that was released for the Game Boy Advance in 2004. Leaf Green APK is an Android app that lets you play this nostalgic game on your smartphone or tablet. In this article, we will show you what Leaf Green APK is, how to download and install it on your Android device, and how to play it on your PC or Mac using an Android emulator.
Leaf Green APK is an Android app that contains the Pokemon Leaf Green game, which is a remake of the original Pokemon Red and Green games that were released for the Game Boy in 1996. The game follows the same plot and gameplay as the original games, but with enhanced graphics, smoother game-play, and new features introduced in Generation III of Pokemon games.
-
A remake of the classic Pokemon game
-
In Leaf Green APK, you play as a young Pokemon trainer who sets out on a journey across the Kanto region to catch and train Pokemon, battle other trainers, and challenge the eight Gym Leaders and the Elite Four. Along the way, you will encounter Team Rocket, a criminal organization that wants to use Pokemon for evil purposes. You will also have a rival who will challenge you throughout your adventure.
-
Features and benefits of Leaf Green APK
-
Leaf Green APK has many features and benefits that make it a great choice for Pokemon fans. Some of them are:
-
-
You can choose from three starter Pokemon: Bulbasaur, Charmander, or Squirtle.
-
You can catch and train over 150 different Pokemon, including some from Generation II and III.
-
You can use items, abilities, moves, and strategies that were not available in the original games.
-
You can trade and battle with other players using the Game Link Cable or the Wireless Adapter.
-
You can access new areas and mini-games, such as the Sevii Islands and the Trainer Tower.
-
You can enjoy the nostalgic music and sound effects from the original games.
-
-
How to download and install Leaf Green APK on your Android device
-
If you want to play Leaf Green APK on your Android device, you will need to download and install it manually, as it is not available on the Google Play Store. Here are the requirements and compatibility, steps to download and install, and tips and tricks to enjoy Leaf Green APK on your Android device.
-
Requirements and compatibility
-
To download and install Leaf Green APK on your Android device, you will need:
-
-
An Android device running Android 4.0 or higher.
-
At least 28 MB of free storage space on your device or SD card.
-
A stable internet connection to download the APK file.
-
A file manager app to locate and install the APK file.
-
Allow installation of apps from unknown sources on your device settings.
-
-
Steps to download and install Leaf Green APK
-
To download and install Leaf Green APK on your Android device, follow these steps:
-
download pokemon leaf green apk for android
-download leaf green version game apk
-download leaf green gba emulator apk
-download leaf green apk mod
-download leaf green apk offline
-download leaf green apk latest version
-download leaf green apk no root
-download leaf green apk with cheats
-download leaf green apk english
-download leaf green apk full version
-download leaf green apk free fire
-download leaf green apk hack
-download leaf green apk unlimited money
-download leaf green apk 2023
-download leaf green apk update
-download leaf green apk obb
-download leaf green apk data
-download leaf green apk file
-download leaf green apk android 11
-download leaf green apk android 10
-download leaf green apk android 9
-download leaf green apk android 8
-download leaf green apk android 7
-download leaf green apk android 6
-download leaf green apk android 5
-download leaf green apk for pc
-download leaf green apk for ios
-download leaf green apk for windows 10
-download leaf green apk for windows 7
-download leaf green apk for mac
-download leaf green apk for laptop
-download leaf green apk for tablet
-download leaf green apk for chromebook
-download leaf green apk for firestick
-download leaf green apk for smart tv
-how to download leaf green apk on android
-how to download leaf green apk on pc
-how to download leaf green apk on ios
-how to download leaf green apk on windows 10
-how to download leaf green apk on windows 7
-how to download leaf green apk on mac
-how to download leaf green apk on laptop
-how to download leaf green apk on tablet
-how to download leaf green apk on chromebook
-how to download leaf green apk on firestick
-how to download leaf green apk on smart tv
-where to download leaf green apk for free
-where to download leaf green apk safely
-where to download leaf green apk without virus
-where to download leaf green apk without ads
-
-
Go to this link on your browser and tap on Download APK (28 MB) button.
-
Wait for the download to finish and locate the downloaded file using your file manager app.
-
Tap on the file and select Install when prompted.
-
Wait for the installation to complete and tap on Open to launch the app.
Tips and tricks to enjoy Leaf Green APK
-
Now that you have downloaded and installed Leaf Green APK on your Android device, you can start playing and enjoying this classic Pokemon game. Here are some tips and tricks to help you get the most out of Leaf Green APK:
-
-
Save your game frequently, especially before important battles or events.
-
Use the fast forward feature to speed up the game-play and skip boring parts.
-
Use the cheat codes to get unlimited items, money, or rare Pokemon.
-
Explore every area and talk to every NPC to find hidden secrets and rewards.
-
Use the online features to trade and battle with other players around the world.
-
-
How to play Leaf Green APK on your PC or Mac
-
If you want to play Leaf Green APK on a bigger screen and with better controls, you can also play it on your PC or Mac using an Android emulator. An Android emulator is a software that mimics the Android operating system on your computer, allowing you to run Android apps and games on your PC or Mac. In this section, we will show you what an Android emulator is, the best Android emulators for PC and Mac, and the steps to play Leaf Green APK on your PC or Mac using an Android emulator.
-
What is an Android emulator?
-
An Android emulator is a software that creates a virtual Android device on your computer, allowing you to run Android apps and games on your PC or Mac. An Android emulator can have different features and functions, such as:
-
-
Customizing the screen size, resolution, and orientation of the virtual device.
-
Mapping the keyboard and mouse to control the virtual device.
-
Accessing the Google Play Store and other app stores on the virtual device.
-
Recording and capturing the screen of the virtual device.
-
Sharing files and data between the virtual device and your computer.
-
-
The best Android emulators for PC and Mac
-
There are many Android emulators available for PC and Mac, but not all of them are compatible with Leaf Green APK. Some of them may have issues with performance, graphics, sound, or online features. Therefore, it is important to choose an Android emulator that works well with Leaf Green APK. Here are some of the best Android emulators for PC and Mac that we recommend:
-
-
Name
Description
Download Link
-
NoxPlayer
A fast and stable Android emulator that supports high-performance gaming and multiple instances.
Steps to play Leaf Green APK on your PC or Mac using an Android emulator
-
To play Leaf Green APK on your PC or Mac using an Android emulator, follow these steps:
-
-
Download and install an Android emulator of your choice from the links above.
-
Launch the Android emulator and sign in with your Google account.
-
Download Leaf Green APK from this link on your browser.
-
Drag and drop the downloaded file into the Android emulator window or use the file manager app to locate and install it.
-
Launch Leaf Green APK from the app drawer or home screen of the Android emulator.
-
Enjoy playing Leaf Green APK on your PC or Mac with better graphics and controls.
-
-
Conclusion
-
In conclusion, Leaf Green APK is an amazing app that lets you play one of the best Pokemon games ever made on your Android device or PC or Mac. It has all the features and benefits of the original game, plus some new ones that make it more enjoyable and convenient. If you want to download and install Leaf Green APK on your Android device or PC or Mac, you can follow the steps we have provided in this article. We hope you have fun playing Leaf Green APK and reliving your childhood memories of Pokemon.
FAQs
-
Here are some of the frequently asked questions about Leaf Green APK:
-
-
Q: Is Leaf Green APK safe and legal to use?
-A: Leaf Green APK is safe and virus-free to use, as long as you download it from a trusted source. However, it is not legal to use Leaf Green APK if you do not own the original game or have the permission of the game developer.
-
Q: How can I save my progress in Leaf Green APK?
-A: You can save your progress in Leaf Green APK by using the in-game save feature or the emulator's save state feature. The in-game save feature lets you save your game at any Pokemon Center or on the world map. The emulator's save state feature lets you save your game at any point and load it later.
-
Q: How can I transfer my save data from Leaf Green APK to another device?
-A: You can transfer your save data from Leaf Green APK to another device by copying the save file from your device's storage or SD card to the other device's storage or SD card. The save file is usually located in the folder named "LeafGreen" or "com.nintendo.pokemonleafgreen".
-
Q: How can I update Leaf Green APK to the latest version?
-A: You can update Leaf Green APK to the latest version by downloading and installing the new APK file from the same source you downloaded it from. However, you should back up your save data before updating, as some updates may cause compatibility issues or data loss.
-
Q: How can I contact the developer of Leaf Green APK?
-A: You can contact the developer of Leaf Green APK by visiting their website or social media pages. However, please note that Leaf Green APK is not an official app from Nintendo or Game Freak, and they may not respond to your queries or requests.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Solitaire Hack Mod APK How to Play the Classic Card Game with More Fun.md b/spaces/congsaPfin/Manga-OCR/logs/Solitaire Hack Mod APK How to Play the Classic Card Game with More Fun.md
deleted file mode 100644
index d0372a2891274fd87c57308f41777fe1be1c5fe9..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Solitaire Hack Mod APK How to Play the Classic Card Game with More Fun.md
+++ /dev/null
@@ -1,111 +0,0 @@
-
-
Solitaire Hack Mod APK: How to Play and Enjoy the Classic Card Game
-
Introduction
-
Solitaire is one of the most popular card games in the world. It is a game that can be played by anyone, anywhere, anytime. It is a game that challenges your mind, relaxes your mood, and entertains you for hours. But what if you want to spice up your solitaire experience with some extra features and options? That's where solitaire hack mod apk comes in.
Solitaire hack mod apk is a modified version of the classic solitaire game that you can download and install on your Android device. It offers you unlimited money, unlimited hints, unlimited undos, no ads, and more. It also lets you customize the appearance of the cards, the background, and the sound effects. With solitaire hack mod apk, you can enjoy the classic card game like never before.
-
How to download and install solitaire hack mod apk
-
Step 1: Find a reliable source for the apk file
-
The first thing you need to do is to find a trustworthy website that provides the solitaire hack mod apk file. You can use a search engine like Google or Bing to look for it, or you can use one of the links below:
Make sure you check the reviews and ratings of the website before downloading anything. Also, avoid clicking on any suspicious ads or pop-ups that may appear on the site.
-
Step 2: Enable unknown sources on your device
-
The next thing you need to do is to allow your device to install apps from unknown sources. This is because solitaire hack mod apk is not available on the official Google Play Store, so you need to enable this option to install it manually. To do this, follow these steps:
-
-
Go to your device's settings and look for security or privacy options.
-
Find the option that says "Unknown sources" or "Install unknown apps" and toggle it on.
-
A warning message may pop up, asking you to confirm your choice. Tap on "OK" or "Allow".
-
-
Now you are ready to install the solitaire hack mod apk file on your device.
-
solitaire mod apk unlimited money
-solitaire hack apk download
-solitaire mod apk no ads
-solitaire hack apk android
-solitaire mod apk latest version
-solitaire hack apk ios
-solitaire mod apk offline
-solitaire hack apk 2023
-solitaire mod apk premium
-solitaire hack apk free
-solitaire mod apk unlocked
-solitaire hack apk online
-solitaire mod apk pro
-solitaire hack apk unlimited coins
-solitaire mod apk revdl
-solitaire hack apk 2022
-solitaire mod apk happymod[^1^]
-solitaire hack apk rexdl
-solitaire mod apk vip
-solitaire hack apk no root
-solitaire mod apk ad free
-solitaire hack apk all unlocked
-solitaire mod apk full version
-solitaire hack apk cheat
-solitaire mod apk 10.6.0[^1^]
-solitaire hack apk 4.16.3141.1[^1^]
-solitaire mod apk microsoft[^1^]
-solitaire hack apk classic
-solitaire mod apk spider
-solitaire hack apk klondike
-solitaire mod apk tripeaks
-solitaire hack apk pyramid
-solitaire mod apk freecell
-solitaire hack apk daily challenges[^1^]
-solitaire mod apk unlimited hints
-solitaire hack apk unlimited stars[^1^]
-solitaire mod apk 2023 download
-solitaire hack apk 2022 download
-solitaire mod apk android 1
-solitaire hack apk android oyun club
-solitaire mod apk android republic
-solitaire hack apk an1.com
-solitaire mod apk apkpure
-solitaire hack apk apkmody.io[^1^]
-solitaire mod apk apkmirror[^1^]
-solitaire hack apk apknite.com[^1^]
-solitaire mod apk apksfree.com[^1^]
-solitaire hack apk apktada.com[^1^]
-
Step 3: Download and install the apk file
-
The final thing you need to do is to download and install the solitaire hack mod apk file on your device. To do this, follow these steps:
-
-
Go back to the website where you found the apk file and tap on the download button.
-
Wait for the download to finish and then locate the file in your device's storage.
-
Tap on the file and follow the instructions on the screen to install it.
-
Once the installation is complete, you can launch the app and start playing solitaire hack mod apk.
-
-
How to play solitaire hack mod apk
-
The rules of the game
-
The rules of solitaire hack mod
apk are the same as the original solitaire game. You have to arrange the cards in four piles, one for each suit, from ace to king. You can move cards from the tableau (the seven columns at the bottom) to the foundation (the four piles at the top) or within the tableau. You can only move cards that are face up and follow a descending order and an alternating color. You can also draw cards from the stock (the pile at the top left) and place them on the waste (the pile next to the stock) or on the tableau. You win the game when you complete all four piles in the foundation.
-
The tips and tricks for winning
-
Playing solitaire hack mod apk can be fun and easy, especially with the unlimited money, hints, and undos that you get. However, if you want to challenge yourself and improve your skills, here are some tips and tricks that you can use:
-
-
Plan ahead: Before you make a move, think about how it will affect the rest of the game. Try to avoid moves that will block your access to other cards or create empty spaces that you cannot fill.
-
Use the hints wisely: The hints can help you find possible moves that you may have missed, but they can also spoil the fun of the game. Use them sparingly and only when you are really stuck.
-
Use the undos carefully: The undos can help you undo a mistake or try a different strategy, but they can also make you lose track of your progress. Use them only when necessary and not as a way to cheat.
-
Focus on the aces: The aces are the most important cards in the game, as they start the foundation piles. Try to free them as soon as possible and move them to the foundation.
-
Build down in color: When moving cards within the tableau, try to build down in color, meaning that you place cards of the same suit on top of each other. This will make it easier to move them to the foundation later.
-
-
The benefits of playing solitaire
-
Solitaire is not only a fun and relaxing game, but also a beneficial one. Playing solitaire can help you improve your mental abilities, such as memory, concentration, logic, and problem-solving. It can also help you reduce stress, boredom, and anxiety. Solitaire is a game that you can play anytime, anywhere, and at your own pace. It is a game that can keep you entertained for hours and make you feel good.
-
Conclusion
-
Solitaire hack mod apk is a great way to enjoy the classic card game with some extra features and options. You can download and install it on your Android device easily and safely. You can play it with unlimited money, hints, undos, no ads, and more. You can also customize the appearance of the game according to your preferences. Solitaire hack mod apk is a game that can challenge your mind, relax your mood, and entertain you for hours.
-
If you are looking for a new and exciting way to play solitaire, then solitaire hack mod apk is the perfect choice for you. Download it now and start playing!
-
FAQs
-
-
Q: Is solitaire hack mod apk safe to use?
-
A: Yes, solitaire hack mod apk is safe to use as long as you download it from a reliable source and follow the instructions carefully. However, you should always be careful when installing apps from unknown sources and scan them for viruses or malware before using them.
-
Q: Is solitaire hack mod apk compatible with all Android devices?
-
A: Solitaire hack mod apk should work on most Android devices that run on Android 4.4 or higher. However, some devices may have compatibility issues or performance problems due to different specifications or settings.
-
Q: How do I update solitaire hack mod apk?
-
A: Solitaire hack mod apk does not have an automatic update feature, so you need to check for updates manually. You can do this by visiting the website where you downloaded the apk file and looking for new versions or updates. Alternatively, you can uninstall the app and install it again with the latest version.
-
Q: How do I uninstall solitaire hack mod apk?
-
A: Solitaire hack mod apk can be uninstalled like any other app on your device. You can do this by going to your device's settings and looking for apps or applications options. Then find solitaire hack mod apk in the list of installed apps and tap on it. Then tap on uninstall or remove option and confirm your choice.
-
Q: How do I contact the developer of solitaire hack mod apk?
-
A: Solitaire hack mod apk is developed by Microsoft Corporation, the same company that created the original solitaire game. You can contact them by visiting their official website at https://www.microsoft.com/en-us/ or by sending them an email at solitaire@microsoft.com.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/Hercules-Dwayne-Johnson-1080p-Torrent.md b/spaces/contluForse/HuggingGPT/Hercules-Dwayne-Johnson-1080p-Torrent.md
deleted file mode 100644
index 0e31fe207ba34f19b2d051cb7ba252cdf6943cd9..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/Hercules-Dwayne-Johnson-1080p-Torrent.md
+++ /dev/null
@@ -1,84 +0,0 @@
-## Hercules Dwayne Johnson 1080p Torrent
-
-
-
-
-
- 
-
-
-
-
-
-**Download • [https://www.google.com/url?q=https%3A%2F%2Furllie.com%2F2txoJ9&sa=D&sntz=1&usg=AOvVaw2Sp9Y5XV38y95cyY6TpTb6](https://www.google.com/url?q=https%3A%2F%2Furllie.com%2F2txoJ9&sa=D&sntz=1&usg=AOvVaw2Sp9Y5XV38y95cyY6TpTb6)**
-
-
-
-
-
-
-
-
-
-
-
-
-
-# Hercules (2014): A Mythical Adventure Starring Dwayne Johnson
-
-
-
-Hercules is a 2014 action-adventure film based on the legendary Greek hero, starring Dwayne Johnson as the titular character. The film follows Hercules and his band of mercenaries as they are hired by the King of Thrace to train his army and defeat a tyrannical warlord. Along the way, Hercules must face his past and his true identity as the son of Zeus.
-
-
-
-The film is directed by Brett Ratner and also features Ian McShane, John Hurt, Rufus Sewell, Joseph Fiennes, and Irina Shayk in supporting roles. The film is loosely based on the graphic novel Hercules: The Thracian Wars by Steve Moore and Admira Wijaya.
-
-
-
-Hercules was released on July 25, 2014 in the United States and received mixed reviews from critics and audiences. The film grossed $244 million worldwide against a budget of $100 million.
-
-
-
-If you are a fan of action-packed movies with epic battles, stunning visuals, and charismatic performances, you might want to check out Hercules (2014) in high definition. You can download the movie torrent in 1080p or 3D from various sources online. However, be aware that downloading torrents may be illegal in your country and may expose you to malware or viruses. Always use a VPN when downloading torrents to protect your privacy and security.
-
-
-
-Here are some links where you can find Hercules (2014) movie torrent in 1080p or 3D:
-
-
-
-- YTS: [https://yts.do/movie/hercules-2014/](https://yts.do/movie/hercules-2014/) [^1^]
-
-- Forillowarez: [https://forillowarez.com/threads/h%C3%A9rcules-full-bluray-1080p-cast-ing-fr-ale-it-sub-varios-aventuras-2014.18758/](https://forillowarez.com/threads/h%C3%A9rcules-full-bluray-1080p-cast-ing-fr-ale-it-sub-varios-aventuras-2014.18758/) [^2^]
-
-- Filmespiratas: [https://filmespiratas.blogspot.com/2014/06/hercules-download-dublado.html](https://filmespiratas.blogspot.com/2014/06/hercules-download-dublado.html) [^3^]
-
-
-
-Enjoy watching Hercules (2014) and let us know what you think of the movie in the comments below!
-
-
-
-Hercules (2014) is not the only movie adaptation of the Greek hero's story. There have been many versions of Hercules on the big screen and on television, ranging from serious to comedic, from faithful to creative. Here are some of the most notable ones:
-
-
-
-1. Hercules (1958): This is the first film in a series of Italian peplum movies starring Steve Reeves as Hercules. The film follows Hercules as he helps Jason and the Argonauts in their quest for the Golden Fleece. The film was a huge success and spawned several sequels and spin-offs.
-
-2. Hercules: The Legendary Journeys (1995-1999): This is a popular television series starring Kevin Sorbo as Hercules and Michael Hurst as his sidekick Iolaus. The series mixes Greek mythology with humor and adventure, and features many guest stars from other fantasy shows. The series also spawned a spin-off, Xena: Warrior Princess.
-
-3. Hercules (1997): This is an animated musical comedy film produced by Disney, featuring the voice of Tate Donovan as Hercules and James Woods as Hades. The film reimagines Hercules as a naive and clumsy teenager who must prove himself as a true hero to reclaim his godhood. The film features songs by Alan Menken and David Zippel.
-
-4. The Legend of Hercules (2014): This is another action-adventure film released in the same year as Hercules (2014), starring Kellan Lutz as Hercules and Gaia Weiss as Hebe. The film focuses on Hercules' origin story as he rebels against his stepfather King Amphitryon and falls in love with Hebe, the princess of Crete.
-
-
-
-Which version of Hercules do you prefer? Do you think there will be more movies about him in the future? Share your thoughts with us!
-
- 1b8d091108
-
-
-
-
-
diff --git a/spaces/contluForse/HuggingGPT/assets/Browse Mac App Store.md b/spaces/contluForse/HuggingGPT/assets/Browse Mac App Store.md
deleted file mode 100644
index f48a9299b7b33073a8105b1cba20d37c03ca0e33..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Browse Mac App Store.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
Now of course because the App Store is not affiliated with Apple, clicking on the button to open up more info on that app will launch its browser based iTunes page. From there, you can either launch iTunes, or can choose to move to your iOS device and search for that specific app.
For Mac apps, the primary category you select is also the category in which the app appears when users browse the Mac App Store or filter search results, and it determines placement in the Categories tab on the Mac App Store in macOS 10.14 or later.
-
It could be argued that it is useful for antivirus software to collect certain limited browsing history leading up to a malware/webpage detection and blocking. But it is very hard to argue to exfiltrate the entire browsing history of all installed browsers regardless of whether the user has encountered malware or not. In addition, there was nothing in the app to inform the user about this data collection, and there was no way to opt out of this data collection.Dr. CleanerUnfortunately, other apps by the same developer are also collecting this data. We observed the same data being collected by Dr. Cleaner, minus the list of installed applications. There is really no good reason for a "cleaning" app to be collecting this kind of user data, even if the users were informed, which was not the case.
-
Brave is available as a fast, free, secure web browser for your mobile devices. Complete with a built-in ad blocker that prevents tracking, and optimized for mobile data and battery life savings. Get the Brave Browser (mobile) for Android or iOS.
-
Note that the search above revealed hundreds of hits. If you'd look at that many results in a flat list, it would be hard to browse. With this new hierarchical view, however, you can directly look for the results in the folders that interest you.
-
If you primarily search media files such as videos, photos, or audio files, NeoFinder might be the right tool for you. It permanently catalogues your files, even those that may be stored on other disks or FTP servers that are not always attached, and keeps even detailed information about your image properties (EXIF data etc.), thumbnails, XMP data, and more.
-
In-app purchase keys are private and can only be downloaded once. After downloading the key, store it in a safe place. Remember to revoke a key immediately if it becomes lost or compromised and ensure that offers using this key are updated.
-
-
In the dialog that appears, choose Limited Access or Full Access. If you select Full Access, all users will have access to the app. Users with the Admin, Finance, and Access to Reports roles will always have access to all apps. Then click Restore.
-
If your iOS app offers universal purchase and has an existing macOS platform, the option to offer the iOS app on the Mac App Store will not be available. Additionally, if your iOS app is available on the Mac App Store by using this option and you later add a macOS app by adding the macOS platform in App Store Connect, releasing it to the store will replace the iOS app on the Mac App Store. All existing users for the iOS app will be updated to the macOS app.
-
If you create a new version of an app you removed from the App Store and it gets approved by App Review, the app status remains as Developer Removed from Sale. To release the new version, you must restore it to the App Store first.
-
Auto-renewable subscriptions can be priced by App Store country or region (also known as storefronts.) You can choose from 800 price points in each currency, with the option for the Account Holder to submit a request for additional higher price points.
-
App Store Connect provides comparable prices for all 175 App Store countries and regions, taking into account taxes and foreign exchange rates. If you want to set different prices for specific storefronts, make the changes and click Next.
-
International equivalents for prices not in USD are based on current exchange rates with the specific thresholds which are subject to change based on changes in taxes or foreign exchange rates. View a list of thresholds per storefront.
-
App Store connect provides comparable prices for all 175 App Store countries and regions, taking into account taxes and foreign exchange rates. If you want to set different prices for specific storefronts, make the changes and click Next.
-
You can change the account region of a test account to any of the 175 App Store regions. This allows you to test on different storefronts using the same Sandbox Apple ID, without having to create new testers.
-
I take your privacy very seriously.Reeder does not collect any data.All sensitive data (passwords and other account data) is stored securely in the Keychain on your device or iCloud (if you have enabled iCloud Keychain).
-
We pride ourselves in the security of our product and the flexibility it provides you. You can set a startup password, lock the My Private Files area, and set the level of protection you need. Passwords are stored securely in the iOS KeyChain, but you can choose to enter them each time you connect.
-
Secure access to your computer (Mac or PC) while you are away from home. Use our simple steps to install ZeroTier VPN for secure, encrypted access from your iPad / iPhone. With FileBrowser's efficient network access you can even browse folders and view files over a cellular connection.
-
OS X automatically creates new "default" files for some items when they are cleaned from the system, including Finder Recent Items, Safari Cookies, etc. These newly created files are simply empty or template files used by the operating system to store data going forward. This is normal behavior for the operating system, and does not indicate any problems in cleaning the files.
-
The above guide was all about disabling private browsing on Safari. You must know that this will not though, prevent the concerned person from using incognito mode or private browsing in any other browser like Chrome, Firefox, etc. that is installed on the Mac.
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Cdroller License Key Free Crack How to Restore Your Disc Data in Minutes.md b/spaces/contluForse/HuggingGPT/assets/Cdroller License Key Free Crack How to Restore Your Disc Data in Minutes.md
deleted file mode 100644
index 87ddcf891b13ea72cf94379c15bd3c346a298a63..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Cdroller License Key Free Crack How to Restore Your Disc Data in Minutes.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
2. Keep your computer protected - cracked CDRoller hides security risks. Piracy and security threats come hand in hand, usually because they are done by the same people. When using a cracked copy of CDRoller you take the risk of being infected with a virus, Trojan horse, malware, spyware and other unpleasant surprises hidden in the keygen or the software itself. Usually, the problems caused by malicious code will cost you much more than the money you would pay for CDRoller license.
Using warez version, crack, warez passwords, patches, serial numbers, registration codes, key generator, pirate key, keymaker or keygen forCDRoller 11.40.60.0 license key is illegal and prevent future development ofCDRoller 11.40.60.0. Download links are directly from our mirrors or publisher's website,CDRoller 11.40.60.0 torrent files or shared files from free file sharing and free upload services,including CDRoller 11.40.60.0 Rapidshare, MegaUpload, HellShare, HotFile, FileServe, YouSendIt, SendSpace, DepositFiles, Letitbit, MailBigFile, DropSend, MediaMax, LeapFile, zUpload, MyOtherDrive, DivShare or MediaFire,are not allowed!
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/coraKong/WorldSimulation/plugins/DisasterPlugin.py b/spaces/coraKong/WorldSimulation/plugins/DisasterPlugin.py
deleted file mode 100644
index 5ad5863b855e2a0a4428b23e66fddd1822f630cd..0000000000000000000000000000000000000000
--- a/spaces/coraKong/WorldSimulation/plugins/DisasterPlugin.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import random
-
-class DisasterPlugin:
- def __init__(self, disaster_frequency, disaster_prob):
- self.disaster_frequency = disaster_frequency
- self.disaster_prob = disaster_prob
-
- def trigger_disaster(self, characters, character_die_callback):
- print("发生自然灾难...")
- num_killed = int(len(characters) * self.disaster_prob)
- killed = random.sample(characters, num_killed)
- for c in killed:
- c.history.append(f"{c.real_age}岁,死于自然灾难")
- character_die_callback(c)
-
- def execute(self, characters, round_num, character_die_callback):
- if round_num % self.disaster_frequency == 0 and random.random() < self.disaster_prob:
- self.trigger_disaster(characters, character_die_callback)
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/dist_utils.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/dist_utils.py
deleted file mode 100644
index d3a1ef3fda5ceeb31bf15a73779da1b1903ab0fe..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/dist_utils.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import functools
-import os
-import subprocess
-from collections import OrderedDict
-
-import torch
-import torch.multiprocessing as mp
-from torch import distributed as dist
-from torch._utils import (_flatten_dense_tensors, _take_tensors,
- _unflatten_dense_tensors)
-
-
-def init_dist(launcher, backend='nccl', **kwargs):
- if mp.get_start_method(allow_none=True) is None:
- mp.set_start_method('spawn')
- if launcher == 'pytorch':
- _init_dist_pytorch(backend, **kwargs)
- elif launcher == 'mpi':
- _init_dist_mpi(backend, **kwargs)
- elif launcher == 'slurm':
- _init_dist_slurm(backend, **kwargs)
- else:
- raise ValueError(f'Invalid launcher type: {launcher}')
-
-
-def _init_dist_pytorch(backend, **kwargs):
- # TODO: use local_rank instead of rank % num_gpus
- rank = int(os.environ['RANK'])
- num_gpus = torch.cuda.device_count()
- torch.cuda.set_device(rank % num_gpus)
- dist.init_process_group(backend=backend, **kwargs)
-
-
-def _init_dist_mpi(backend, **kwargs):
- # TODO: use local_rank instead of rank % num_gpus
- rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
- num_gpus = torch.cuda.device_count()
- torch.cuda.set_device(rank % num_gpus)
- dist.init_process_group(backend=backend, **kwargs)
-
-
-def _init_dist_slurm(backend, port=None):
- """Initialize slurm distributed training environment.
-
- If argument ``port`` is not specified, then the master port will be system
- environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
- environment variable, then a default port ``29500`` will be used.
-
- Args:
- backend (str): Backend of torch.distributed.
- port (int, optional): Master port. Defaults to None.
- """
- proc_id = int(os.environ['SLURM_PROCID'])
- ntasks = int(os.environ['SLURM_NTASKS'])
- node_list = os.environ['SLURM_NODELIST']
- num_gpus = torch.cuda.device_count()
- torch.cuda.set_device(proc_id % num_gpus)
- addr = subprocess.getoutput(
- f'scontrol show hostname {node_list} | head -n1')
- # specify master port
- if port is not None:
- os.environ['MASTER_PORT'] = str(port)
- elif 'MASTER_PORT' in os.environ:
- pass # use MASTER_PORT in the environment variable
- else:
- # 29500 is torch.distributed default port
- os.environ['MASTER_PORT'] = '29500'
- # use MASTER_ADDR in the environment variable if it already exists
- if 'MASTER_ADDR' not in os.environ:
- os.environ['MASTER_ADDR'] = addr
- os.environ['WORLD_SIZE'] = str(ntasks)
- os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
- os.environ['RANK'] = str(proc_id)
- dist.init_process_group(backend=backend)
-
-
-def get_dist_info():
- if dist.is_available() and dist.is_initialized():
- rank = dist.get_rank()
- world_size = dist.get_world_size()
- else:
- rank = 0
- world_size = 1
- return rank, world_size
-
-
-def master_only(func):
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- rank, _ = get_dist_info()
- if rank == 0:
- return func(*args, **kwargs)
-
- return wrapper
-
-
-def allreduce_params(params, coalesce=True, bucket_size_mb=-1):
- """Allreduce parameters.
-
- Args:
- params (list[torch.Parameters]): List of parameters or buffers of a
- model.
- coalesce (bool, optional): Whether allreduce parameters as a whole.
- Defaults to True.
- bucket_size_mb (int, optional): Size of bucket, the unit is MB.
- Defaults to -1.
- """
- _, world_size = get_dist_info()
- if world_size == 1:
- return
- params = [param.data for param in params]
- if coalesce:
- _allreduce_coalesced(params, world_size, bucket_size_mb)
- else:
- for tensor in params:
- dist.all_reduce(tensor.div_(world_size))
-
-
-def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
- """Allreduce gradients.
-
- Args:
- params (list[torch.Parameters]): List of parameters of a model
- coalesce (bool, optional): Whether allreduce parameters as a whole.
- Defaults to True.
- bucket_size_mb (int, optional): Size of bucket, the unit is MB.
- Defaults to -1.
- """
- grads = [
- param.grad.data for param in params
- if param.requires_grad and param.grad is not None
- ]
- _, world_size = get_dist_info()
- if world_size == 1:
- return
- if coalesce:
- _allreduce_coalesced(grads, world_size, bucket_size_mb)
- else:
- for tensor in grads:
- dist.all_reduce(tensor.div_(world_size))
-
-
-def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
- if bucket_size_mb > 0:
- bucket_size_bytes = bucket_size_mb * 1024 * 1024
- buckets = _take_tensors(tensors, bucket_size_bytes)
- else:
- buckets = OrderedDict()
- for tensor in tensors:
- tp = tensor.type()
- if tp not in buckets:
- buckets[tp] = []
- buckets[tp].append(tensor)
- buckets = buckets.values()
-
- for bucket in buckets:
- flat_tensors = _flatten_dense_tensors(bucket)
- dist.all_reduce(flat_tensors)
- flat_tensors.div_(world_size)
- for tensor, synced in zip(
- bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
- tensor.copy_(synced)
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/log_buffer.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/log_buffer.py
deleted file mode 100644
index d949e2941c5400088c7cd8a1dc893d8b233ae785..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/mmpkg/mmcv/runner/log_buffer.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from collections import OrderedDict
-
-import numpy as np
-
-
-class LogBuffer:
-
- def __init__(self):
- self.val_history = OrderedDict()
- self.n_history = OrderedDict()
- self.output = OrderedDict()
- self.ready = False
-
- def clear(self):
- self.val_history.clear()
- self.n_history.clear()
- self.clear_output()
-
- def clear_output(self):
- self.output.clear()
- self.ready = False
-
- def update(self, vars, count=1):
- assert isinstance(vars, dict)
- for key, var in vars.items():
- if key not in self.val_history:
- self.val_history[key] = []
- self.n_history[key] = []
- self.val_history[key].append(var)
- self.n_history[key].append(count)
-
- def average(self, n=0):
- """Average latest n values or all values."""
- assert n >= 0
- for key in self.val_history:
- values = np.array(self.val_history[key][-n:])
- nums = np.array(self.n_history[key][-n:])
- avg = np.sum(values * nums) / np.sum(nums)
- self.output[key] = avg
- self.ready = True
diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/deform_conv.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/deform_conv.py
deleted file mode 100644
index 49547238bcc67775bbeb97184467c3c8eee8df60..0000000000000000000000000000000000000000
--- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/oneformer/detectron2/layers/deform_conv.py
+++ /dev/null
@@ -1,514 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import math
-from functools import lru_cache
-import torch
-from torch import nn
-from torch.autograd import Function
-from torch.autograd.function import once_differentiable
-from torch.nn.modules.utils import _pair
-from torchvision.ops import deform_conv2d
-
-from annotator.oneformer.detectron2.utils.develop import create_dummy_class, create_dummy_func
-
-from .wrappers import _NewEmptyTensorOp
-
-
-class _DeformConv(Function):
- @staticmethod
- def forward(
- ctx,
- input,
- offset,
- weight,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- deformable_groups=1,
- im2col_step=64,
- ):
- if input is not None and input.dim() != 4:
- raise ValueError(
- "Expected 4D tensor as input, got {}D tensor instead.".format(input.dim())
- )
- ctx.stride = _pair(stride)
- ctx.padding = _pair(padding)
- ctx.dilation = _pair(dilation)
- ctx.groups = groups
- ctx.deformable_groups = deformable_groups
- ctx.im2col_step = im2col_step
-
- ctx.save_for_backward(input, offset, weight)
-
- output = input.new_empty(
- _DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride)
- )
-
- ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
-
- if not input.is_cuda:
- # TODO: let torchvision support full features of our deformconv.
- if deformable_groups != 1:
- raise NotImplementedError(
- "Deformable Conv with deformable_groups != 1 is not supported on CPUs!"
- )
- return deform_conv2d(
- input, offset, weight, stride=stride, padding=padding, dilation=dilation
- )
- else:
- cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
- assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
-
- _C.deform_conv_forward(
- input,
- weight,
- offset,
- output,
- ctx.bufs_[0],
- ctx.bufs_[1],
- weight.size(3),
- weight.size(2),
- ctx.stride[1],
- ctx.stride[0],
- ctx.padding[1],
- ctx.padding[0],
- ctx.dilation[1],
- ctx.dilation[0],
- ctx.groups,
- ctx.deformable_groups,
- cur_im2col_step,
- )
- return output
-
- @staticmethod
- @once_differentiable
- def backward(ctx, grad_output):
- input, offset, weight = ctx.saved_tensors
-
- grad_input = grad_offset = grad_weight = None
-
- if not grad_output.is_cuda:
- raise NotImplementedError("Deformable Conv is not supported on CPUs!")
- else:
- cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
- assert (input.shape[0] % cur_im2col_step) == 0, "im2col step must divide batchsize"
-
- if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
- grad_input = torch.zeros_like(input)
- grad_offset = torch.zeros_like(offset)
- _C.deform_conv_backward_input(
- input,
- offset,
- grad_output,
- grad_input,
- grad_offset,
- weight,
- ctx.bufs_[0],
- weight.size(3),
- weight.size(2),
- ctx.stride[1],
- ctx.stride[0],
- ctx.padding[1],
- ctx.padding[0],
- ctx.dilation[1],
- ctx.dilation[0],
- ctx.groups,
- ctx.deformable_groups,
- cur_im2col_step,
- )
-
- if ctx.needs_input_grad[2]:
- grad_weight = torch.zeros_like(weight)
- _C.deform_conv_backward_filter(
- input,
- offset,
- grad_output,
- grad_weight,
- ctx.bufs_[0],
- ctx.bufs_[1],
- weight.size(3),
- weight.size(2),
- ctx.stride[1],
- ctx.stride[0],
- ctx.padding[1],
- ctx.padding[0],
- ctx.dilation[1],
- ctx.dilation[0],
- ctx.groups,
- ctx.deformable_groups,
- 1,
- cur_im2col_step,
- )
-
- return grad_input, grad_offset, grad_weight, None, None, None, None, None, None
-
- @staticmethod
- def _output_size(input, weight, padding, dilation, stride):
- channels = weight.size(0)
- output_size = (input.size(0), channels)
- for d in range(input.dim() - 2):
- in_size = input.size(d + 2)
- pad = padding[d]
- kernel = dilation[d] * (weight.size(d + 2) - 1) + 1
- stride_ = stride[d]
- output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1,)
- if not all(map(lambda s: s > 0, output_size)):
- raise ValueError(
- "convolution input is too small (output would be {})".format(
- "x".join(map(str, output_size))
- )
- )
- return output_size
-
- @staticmethod
- @lru_cache(maxsize=128)
- def _cal_im2col_step(input_size, default_size):
- """
- Calculate proper im2col step size, which should be divisible by input_size and not larger
- than prefer_size. Meanwhile the step size should be as large as possible to be more
- efficient. So we choose the largest one among all divisors of input_size which are smaller
- than prefer_size.
- :param input_size: input batch size .
- :param default_size: default preferred im2col step size.
- :return: the largest proper step size.
- """
- if input_size <= default_size:
- return input_size
- best_step = 1
- for step in range(2, min(int(math.sqrt(input_size)) + 1, default_size)):
- if input_size % step == 0:
- if input_size // step <= default_size:
- return input_size // step
- best_step = step
-
- return best_step
-
-
-class _ModulatedDeformConv(Function):
- @staticmethod
- def forward(
- ctx,
- input,
- offset,
- mask,
- weight,
- bias=None,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- deformable_groups=1,
- ):
- ctx.stride = stride
- ctx.padding = padding
- ctx.dilation = dilation
- ctx.groups = groups
- ctx.deformable_groups = deformable_groups
- ctx.with_bias = bias is not None
- if not ctx.with_bias:
- bias = input.new_empty(1) # fake tensor
- if not input.is_cuda:
- raise NotImplementedError("Deformable Conv is not supported on CPUs!")
- if (
- weight.requires_grad
- or mask.requires_grad
- or offset.requires_grad
- or input.requires_grad
- ):
- ctx.save_for_backward(input, offset, mask, weight, bias)
- output = input.new_empty(_ModulatedDeformConv._infer_shape(ctx, input, weight))
- ctx._bufs = [input.new_empty(0), input.new_empty(0)]
- _C.modulated_deform_conv_forward(
- input,
- weight,
- bias,
- ctx._bufs[0],
- offset,
- mask,
- output,
- ctx._bufs[1],
- weight.shape[2],
- weight.shape[3],
- ctx.stride,
- ctx.stride,
- ctx.padding,
- ctx.padding,
- ctx.dilation,
- ctx.dilation,
- ctx.groups,
- ctx.deformable_groups,
- ctx.with_bias,
- )
- return output
-
- @staticmethod
- @once_differentiable
- def backward(ctx, grad_output):
- if not grad_output.is_cuda:
- raise NotImplementedError("Deformable Conv is not supported on CPUs!")
- input, offset, mask, weight, bias = ctx.saved_tensors
- grad_input = torch.zeros_like(input)
- grad_offset = torch.zeros_like(offset)
- grad_mask = torch.zeros_like(mask)
- grad_weight = torch.zeros_like(weight)
- grad_bias = torch.zeros_like(bias)
- _C.modulated_deform_conv_backward(
- input,
- weight,
- bias,
- ctx._bufs[0],
- offset,
- mask,
- ctx._bufs[1],
- grad_input,
- grad_weight,
- grad_bias,
- grad_offset,
- grad_mask,
- grad_output,
- weight.shape[2],
- weight.shape[3],
- ctx.stride,
- ctx.stride,
- ctx.padding,
- ctx.padding,
- ctx.dilation,
- ctx.dilation,
- ctx.groups,
- ctx.deformable_groups,
- ctx.with_bias,
- )
- if not ctx.with_bias:
- grad_bias = None
-
- return (
- grad_input,
- grad_offset,
- grad_mask,
- grad_weight,
- grad_bias,
- None,
- None,
- None,
- None,
- None,
- )
-
- @staticmethod
- def _infer_shape(ctx, input, weight):
- n = input.size(0)
- channels_out = weight.size(0)
- height, width = input.shape[2:4]
- kernel_h, kernel_w = weight.shape[2:4]
- height_out = (
- height + 2 * ctx.padding - (ctx.dilation * (kernel_h - 1) + 1)
- ) // ctx.stride + 1
- width_out = (
- width + 2 * ctx.padding - (ctx.dilation * (kernel_w - 1) + 1)
- ) // ctx.stride + 1
- return n, channels_out, height_out, width_out
-
-
-deform_conv = _DeformConv.apply
-modulated_deform_conv = _ModulatedDeformConv.apply
-
-
-class DeformConv(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- deformable_groups=1,
- bias=False,
- norm=None,
- activation=None,
- ):
- """
- Deformable convolution from :paper:`deformconv`.
-
- Arguments are similar to :class:`Conv2D`. Extra arguments:
-
- Args:
- deformable_groups (int): number of groups used in deformable convolution.
- norm (nn.Module, optional): a normalization layer
- activation (callable(Tensor) -> Tensor): a callable activation function
- """
- super(DeformConv, self).__init__()
-
- assert not bias
- assert in_channels % groups == 0, "in_channels {} cannot be divisible by groups {}".format(
- in_channels, groups
- )
- assert (
- out_channels % groups == 0
- ), "out_channels {} cannot be divisible by groups {}".format(out_channels, groups)
-
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.kernel_size = _pair(kernel_size)
- self.stride = _pair(stride)
- self.padding = _pair(padding)
- self.dilation = _pair(dilation)
- self.groups = groups
- self.deformable_groups = deformable_groups
- self.norm = norm
- self.activation = activation
-
- self.weight = nn.Parameter(
- torch.Tensor(out_channels, in_channels // self.groups, *self.kernel_size)
- )
- self.bias = None
-
- nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
-
- def forward(self, x, offset):
- if x.numel() == 0:
- # When input is empty, we want to return a empty tensor with "correct" shape,
- # So that the following operations will not panic
- # if they check for the shape of the tensor.
- # This computes the height and width of the output tensor
- output_shape = [
- (i + 2 * p - (di * (k - 1) + 1)) // s + 1
- for i, p, di, k, s in zip(
- x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
- )
- ]
- output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
- return _NewEmptyTensorOp.apply(x, output_shape)
-
- x = deform_conv(
- x,
- offset,
- self.weight,
- self.stride,
- self.padding,
- self.dilation,
- self.groups,
- self.deformable_groups,
- )
- if self.norm is not None:
- x = self.norm(x)
- if self.activation is not None:
- x = self.activation(x)
- return x
-
- def extra_repr(self):
- tmpstr = "in_channels=" + str(self.in_channels)
- tmpstr += ", out_channels=" + str(self.out_channels)
- tmpstr += ", kernel_size=" + str(self.kernel_size)
- tmpstr += ", stride=" + str(self.stride)
- tmpstr += ", padding=" + str(self.padding)
- tmpstr += ", dilation=" + str(self.dilation)
- tmpstr += ", groups=" + str(self.groups)
- tmpstr += ", deformable_groups=" + str(self.deformable_groups)
- tmpstr += ", bias=False"
- return tmpstr
-
-
-class ModulatedDeformConv(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- deformable_groups=1,
- bias=True,
- norm=None,
- activation=None,
- ):
- """
- Modulated deformable convolution from :paper:`deformconv2`.
-
- Arguments are similar to :class:`Conv2D`. Extra arguments:
-
- Args:
- deformable_groups (int): number of groups used in deformable convolution.
- norm (nn.Module, optional): a normalization layer
- activation (callable(Tensor) -> Tensor): a callable activation function
- """
- super(ModulatedDeformConv, self).__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.kernel_size = _pair(kernel_size)
- self.stride = stride
- self.padding = padding
- self.dilation = dilation
- self.groups = groups
- self.deformable_groups = deformable_groups
- self.with_bias = bias
- self.norm = norm
- self.activation = activation
-
- self.weight = nn.Parameter(
- torch.Tensor(out_channels, in_channels // groups, *self.kernel_size)
- )
- if bias:
- self.bias = nn.Parameter(torch.Tensor(out_channels))
- else:
- self.bias = None
-
- nn.init.kaiming_uniform_(self.weight, nonlinearity="relu")
- if self.bias is not None:
- nn.init.constant_(self.bias, 0)
-
- def forward(self, x, offset, mask):
- if x.numel() == 0:
- output_shape = [
- (i + 2 * p - (di * (k - 1) + 1)) // s + 1
- for i, p, di, k, s in zip(
- x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride
- )
- ]
- output_shape = [x.shape[0], self.weight.shape[0]] + output_shape
- return _NewEmptyTensorOp.apply(x, output_shape)
-
- x = modulated_deform_conv(
- x,
- offset,
- mask,
- self.weight,
- self.bias,
- self.stride,
- self.padding,
- self.dilation,
- self.groups,
- self.deformable_groups,
- )
- if self.norm is not None:
- x = self.norm(x)
- if self.activation is not None:
- x = self.activation(x)
- return x
-
- def extra_repr(self):
- tmpstr = "in_channels=" + str(self.in_channels)
- tmpstr += ", out_channels=" + str(self.out_channels)
- tmpstr += ", kernel_size=" + str(self.kernel_size)
- tmpstr += ", stride=" + str(self.stride)
- tmpstr += ", padding=" + str(self.padding)
- tmpstr += ", dilation=" + str(self.dilation)
- tmpstr += ", groups=" + str(self.groups)
- tmpstr += ", deformable_groups=" + str(self.deformable_groups)
- tmpstr += ", bias=" + str(self.with_bias)
- return tmpstr
-
-
-try:
- from annotator.oneformer.detectron2 import _C
-except ImportError:
- # TODO: register ops natively so there is no need to import _C.
- _msg = "detectron2 is not compiled successfully, please build following the instructions!"
- _args = ("detectron2._C", _msg)
- DeformConv = create_dummy_class("DeformConv", *_args)
- ModulatedDeformConv = create_dummy_class("ModulatedDeformConv", *_args)
- deform_conv = create_dummy_func("deform_conv", *_args)
- modulated_deform_conv = create_dummy_func("modulated_deform_conv", *_args)
diff --git a/spaces/crylake/img2poem/query2labels/q2l_infer.py b/spaces/crylake/img2poem/query2labels/q2l_infer.py
deleted file mode 100644
index 214ddfa6d0563dc8ae9cb8114a81af5e02d1468c..0000000000000000000000000000000000000000
--- a/spaces/crylake/img2poem/query2labels/q2l_infer.py
+++ /dev/null
@@ -1,378 +0,0 @@
-import argparse
-import os, sys
-import random
-import datetime
-import time
-from typing import List
-import json
-import numpy as np
-
-import torch
-import torch.nn as nn
-import torch.nn.parallel
-import torch.backends.cudnn as cudnn
-import torch.distributed as dist
-import torch.optim
-import torch.utils.data
-import torch.utils.data.distributed
-
-import _init_paths
-from dataset.get_dataset import get_datasets
-
-
-from utils.logger import setup_logger
-import models
-import models.aslloss
-from models.query2label import build_q2l
-from utils.metric import voc_mAP
-from utils.misc import clean_state_dict
-from utils.slconfig import get_raw_dict
-
-
-def parser_args():
- available_models = ['Q2L-R101-448', 'Q2L-R101-576', 'Q2L-TResL-448', 'Q2L-TResL_22k-448', 'Q2L-SwinL-384', 'Q2L-CvT_w24-384']
-
- parser = argparse.ArgumentParser(description='Query2Label for multilabel classification')
- parser.add_argument('--dataname', help='dataname', default='coco14', choices=['coco14'])
- parser.add_argument('--dataset_dir', help='dir of dataset', default='/comp_robot/liushilong/data/COCO14/')
-
- parser.add_argument('--img_size', default=448, type=int,
- help='image size. default(448)')
- parser.add_argument('-a', '--arch', metavar='ARCH', default='Q2L-R101-448',
- choices=available_models,
- help='model architecture: ' +
- ' | '.join(available_models) +
- ' (default: Q2L-R101-448)')
- parser.add_argument('--config', type=str, help='config file')
-
- parser.add_argument('--output', metavar='DIR',
- help='path to output folder')
- parser.add_argument('--loss', metavar='LOSS', default='asl',
- choices=['asl'],
- help='loss functin')
- parser.add_argument('--num_class', default=80, type=int,
- help="Number of classes.")
- parser.add_argument('-j', '--workers', default=8, type=int, metavar='N',
- help='number of data loading workers (default: 8)')
- parser.add_argument('-b', '--batch-size', default=16, type=int,
- metavar='N',
- help='mini-batch size (default: 16), this is the total '
- 'batch size of all GPUs')
- parser.add_argument('-p', '--print-freq', default=10, type=int,
- metavar='N', help='print frequency (default: 10)')
- parser.add_argument('--resume', type=str, metavar='PATH',
- help='path to latest checkpoint (default: none)')
-
- parser.add_argument('--pretrained', dest='pretrained', action='store_true',
- help='use pre-trained model. default is False. ')
-
- parser.add_argument('--eps', default=1e-5, type=float,
- help='eps for focal loss (default: 1e-5)')
-
- # distribution training
- parser.add_argument('--world-size', default=-1, type=int,
- help='number of nodes for distributed training')
- parser.add_argument('--rank', default=-1, type=int,
- help='node rank for distributed training')
- parser.add_argument('--dist-url', default='tcp://127.0.0.1:3451', type=str,
- help='url used to set up distributed training')
- parser.add_argument('--seed', default=None, type=int,
- help='seed for initializing training. ')
- parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel')
- parser.add_argument('--amp', action='store_true',
- help='use mixture precision.')
- # data aug
- parser.add_argument('--orid_norm', action='store_true', default=False,
- help='using oridinary norm of [0,0,0] and [1,1,1] for mean and std.')
-
-
- # * Transformer
- parser.add_argument('--enc_layers', default=1, type=int,
- help="Number of encoding layers in the transformer")
- parser.add_argument('--dec_layers', default=2, type=int,
- help="Number of decoding layers in the transformer")
- parser.add_argument('--dim_feedforward', default=256, type=int,
- help="Intermediate size of the feedforward layers in the transformer blocks")
- parser.add_argument('--hidden_dim', default=128, type=int,
- help="Size of the embeddings (dimension of the transformer)")
- parser.add_argument('--dropout', default=0.1, type=float,
- help="Dropout applied in the transformer")
- parser.add_argument('--nheads', default=4, type=int,
- help="Number of attention heads inside the transformer's attentions")
- parser.add_argument('--pre_norm', action='store_true')
- parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine'),
- help="Type of positional embedding to use on top of the image features")
- parser.add_argument('--backbone', default='resnet101', type=str,
- help="Name of the convolutional backbone to use")
- parser.add_argument('--keep_other_self_attn_dec', action='store_true',
- help='keep the other self attention modules in transformer decoders, which will be removed default.')
- parser.add_argument('--keep_first_self_attn_dec', action='store_true',
- help='keep the first self attention module in transformer decoders, which will be removed default.')
- parser.add_argument('--keep_input_proj', action='store_true',
- help="keep the input projection layer. Needed when the channel of image features is different from hidden_dim of Transformer layers.")
- args = parser.parse_args()
-
- # update parameters with pre-defined config file
- if args.config:
- with open(args.config, 'r') as f:
- cfg_dict = json.load(f)
- for k,v in cfg_dict.items():
- setattr(args, k, v)
-
- return args
-
-def get_args():
- args = parser_args()
- return args
-
-
-best_mAP = 0
-
-def main():
- args = get_args()
-
- if 'WORLD_SIZE' in os.environ:
- assert args.world_size > 0, 'please set --world-size and --rank in the command line'
- # launch by torch.distributed.launch
- # Single node
- # python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ...
- local_world_size = int(os.environ['WORLD_SIZE'])
- args.world_size = args.world_size * local_world_size
- args.rank = args.rank * local_world_size + args.local_rank
- print('world size: {}, world rank: {}, local rank: {}'.format(args.world_size, args.rank, args.local_rank))
- print('os.environ:', os.environ)
- else:
- # single process, useful for debugging
- # python main.py ...
- args.world_size = 1
- args.rank = 0
- args.local_rank = 0
-
- if args.seed is not None:
- random.seed(args.seed)
- torch.manual_seed(args.seed)
- np.random.seed(args.seed)
-
- torch.cuda.set_device(args.local_rank)
- print('| distributed init (local_rank {}): {}'.format(
- args.local_rank, args.dist_url), flush=True)
- torch.distributed.init_process_group(backend='nccl', init_method=args.dist_url,
- world_size=args.world_size, rank=args.rank)
- cudnn.benchmark = True
-
- # set output dir and logger
- if not args.output:
- args.output = (f"logs/{args.arch}-{datetime.datetime.now()}").replace(' ', '-')
- os.makedirs(args.output, exist_ok=True)
- logger = setup_logger(output=args.output, distributed_rank=dist.get_rank(), color=False, name="Q2L")
- logger.info("Command: "+' '.join(sys.argv))
-
-
- # save config to outputdir
- if dist.get_rank() == 0:
- path = os.path.join(args.output, "config.json")
- with open(path, 'w') as f:
- json.dump(get_raw_dict(args), f, indent=2)
- logger.info("Full config saved to {}".format(path))
-
- logger.info('world size: {}'.format(dist.get_world_size()))
- logger.info('dist.get_rank(): {}'.format(dist.get_rank()))
- logger.info('local_rank: {}'.format(args.local_rank))
-
- return main_worker(args, logger)
-
-def main_worker(args, logger):
- global best_mAP
-
- # build model
- model = build_q2l(args)
- model = model.cuda()
- model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], broadcast_buffers=False)
- criterion = models.aslloss.AsymmetricLossOptimized(
- gamma_neg=args.gamma_neg, gamma_pos=args.gamma_pos,
- disable_torch_grad_focal_loss=True,
- eps=args.eps,
- )
-
-
- # optionally resume from a checkpoint
- if args.resume:
- if os.path.isfile(args.resume):
- logger.info("=> loading checkpoint '{}'".format(args.resume))
- checkpoint = torch.load(args.resume, map_location=torch.device(dist.get_rank()))
- state_dict = clean_state_dict(checkpoint['state_dict'])
- model.module.load_state_dict(state_dict, strict=True)
- del checkpoint
- del state_dict
- torch.cuda.empty_cache()
- else:
- logger.info("=> no checkpoint found at '{}'".format(args.resume))
-
- # Data loading code
- _, val_dataset = get_datasets(args)
- assert args.batch_size // dist.get_world_size() == args.batch_size / dist.get_world_size(), 'Batch size is not divisible by num of gpus.'
- val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)
- val_loader = torch.utils.data.DataLoader(
- val_dataset, batch_size=args.batch_size // dist.get_world_size(), shuffle=False,
- num_workers=args.workers, pin_memory=True, sampler=val_sampler)
-
-
- # for eval only
- _, mAP = validate(val_loader, model, criterion, args, logger)
- logger.info(' * mAP {mAP:.1f}'
- .format(mAP=mAP))
- return
-
-
-
-@torch.no_grad()
-def validate(val_loader, model, criterion, args, logger):
- batch_time = AverageMeter('Time', ':5.3f')
- losses = AverageMeter('Loss', ':5.3f')
- mem = AverageMeter('Mem', ':.0f', val_only=True)
-
- progress = ProgressMeter(
- len(val_loader),
- [batch_time, losses, mem],
- prefix='Test: ')
-
- # switch to evaluate mode
- model.eval()
- saved_data = []
- with torch.no_grad():
- end = time.time()
- for i, (images, target) in enumerate(val_loader):
- images = images.cuda(non_blocking=True)
- target = target.cuda(non_blocking=True)
-
- # compute output
- with torch.cuda.amp.autocast(enabled=args.amp):
- output = model(images)
- loss = criterion(output, target)
- output_sm = nn.functional.sigmoid(output)
-
- # record loss
- losses.update(loss.item(), images.size(0))
- mem.update(torch.cuda.max_memory_allocated() / 1024.0 / 1024.0)
-
- # save some data
- _item = torch.cat((output_sm.detach().cpu(), target.detach().cpu()), 1)
- saved_data.append(_item)
-
- # measure elapsed time
- batch_time.update(time.time() - end)
- end = time.time()
-
- if i % args.print_freq == 0 and dist.get_rank() == 0:
- progress.display(i, logger)
-
- logger.info('=> synchronize...')
- if dist.get_world_size() > 1:
- dist.barrier()
- loss_avg, = map(
- _meter_reduce if dist.get_world_size() > 1 else lambda x: x.avg,
- [losses]
- )
-
- # calculate mAP
- saved_data = torch.cat(saved_data, 0).numpy()
- saved_name = 'saved_data_tmp.{}.txt'.format(dist.get_rank())
- np.savetxt(os.path.join(args.output, saved_name), saved_data)
- if dist.get_world_size() > 1:
- dist.barrier()
-
- if dist.get_rank() == 0:
- print("Calculating mAP:")
- filenamelist = ['saved_data_tmp.{}.txt'.format(ii) for ii in range(dist.get_world_size())]
- metric_func = voc_mAP
- mAP, aps = metric_func([os.path.join(args.output, _filename) for _filename in filenamelist], args.num_class, return_each=True)
-
- logger.info(" mAP: {}".format(mAP))
- logger.info(" aps: {}".format(np.array2string(aps, precision=5)))
- else:
- mAP = 0
-
- if dist.get_world_size() > 1:
- dist.barrier()
-
- return loss_avg, mAP
-
-
-##################################################################################
-
-def _meter_reduce(meter):
- meter_sum = torch.FloatTensor([meter.sum]).cuda()
- meter_count = torch.FloatTensor([meter.count]).cuda()
- torch.distributed.reduce(meter_sum, 0)
- torch.distributed.reduce(meter_count, 0)
- meter_avg = meter_sum / meter_count
-
- return meter_avg.item()
-
-
-def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
- # torch.save(state, filename)
- if is_best:
- torch.save(state, os.path.split(filename)[0] + '/model_best.pth.tar')
- # shutil.copyfile(filename, os.path.split(filename)[0] + '/model_best.pth.tar')
-
-
-class AverageMeter(object):
- """Computes and stores the average and current value"""
- def __init__(self, name, fmt=':f', val_only=False):
- self.name = name
- self.fmt = fmt
- self.val_only = val_only
- self.reset()
-
- def reset(self):
- self.val = 0
- self.avg = 0
- self.sum = 0
- self.count = 0
-
- def update(self, val, n=1):
- self.val = val
- self.sum += val * n
- self.count += n
- self.avg = self.sum / self.count
-
- def __str__(self):
- if self.val_only:
- fmtstr = '{name} {val' + self.fmt + '}'
- else:
- fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
- return fmtstr.format(**self.__dict__)
-
-
-class ProgressMeter(object):
- def __init__(self, num_batches, meters, prefix=""):
- self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
- self.meters = meters
- self.prefix = prefix
-
- def display(self, batch, logger):
- entries = [self.prefix + self.batch_fmtstr.format(batch)]
- entries += [str(meter) for meter in self.meters]
- logger.info(' '.join(entries))
-
- def _get_batch_fmtstr(self, num_batches):
- num_digits = len(str(num_batches // 1))
- fmt = '{:' + str(num_digits) + 'd}'
- return '[' + fmt + '/' + fmt.format(num_batches) + ']'
-
-
-def kill_process(filename:str, holdpid:int) -> List[str]:
- # used for training only.
- import subprocess, signal
- res = subprocess.check_output("ps aux | grep {} | grep -v grep | awk '{{print $2}}'".format(filename), shell=True, cwd="./")
- res = res.decode('utf-8')
- idlist = [i.strip() for i in res.split('\n') if i != '']
- print("kill: {}".format(idlist))
- for idname in idlist:
- if idname != str(holdpid):
- os.kill(int(idname), signal.SIGKILL)
- return idlist
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/cvlab/zero123-live/ldm/data/nerf_like.py b/spaces/cvlab/zero123-live/ldm/data/nerf_like.py
deleted file mode 100644
index 84ef18288db005c72d3b5832144a7bd5cfffe9b2..0000000000000000000000000000000000000000
--- a/spaces/cvlab/zero123-live/ldm/data/nerf_like.py
+++ /dev/null
@@ -1,165 +0,0 @@
-from torch.utils.data import Dataset
-import os
-import json
-import numpy as np
-import torch
-import imageio
-import math
-import cv2
-from torchvision import transforms
-
-def cartesian_to_spherical(xyz):
- ptsnew = np.hstack((xyz, np.zeros(xyz.shape)))
- xy = xyz[:,0]**2 + xyz[:,1]**2
- z = np.sqrt(xy + xyz[:,2]**2)
- theta = np.arctan2(np.sqrt(xy), xyz[:,2]) # for elevation angle defined from Z-axis down
- #ptsnew[:,4] = np.arctan2(xyz[:,2], np.sqrt(xy)) # for elevation angle defined from XY-plane up
- azimuth = np.arctan2(xyz[:,1], xyz[:,0])
- return np.array([theta, azimuth, z])
-
-
-def get_T(T_target, T_cond):
- theta_cond, azimuth_cond, z_cond = cartesian_to_spherical(T_cond[None, :])
- theta_target, azimuth_target, z_target = cartesian_to_spherical(T_target[None, :])
-
- d_theta = theta_target - theta_cond
- d_azimuth = (azimuth_target - azimuth_cond) % (2 * math.pi)
- d_z = z_target - z_cond
-
- d_T = torch.tensor([d_theta.item(), math.sin(d_azimuth.item()), math.cos(d_azimuth.item()), d_z.item()])
- return d_T
-
-def get_spherical(T_target, T_cond):
- theta_cond, azimuth_cond, z_cond = cartesian_to_spherical(T_cond[None, :])
- theta_target, azimuth_target, z_target = cartesian_to_spherical(T_target[None, :])
-
- d_theta = theta_target - theta_cond
- d_azimuth = (azimuth_target - azimuth_cond) % (2 * math.pi)
- d_z = z_target - z_cond
-
- d_T = torch.tensor([math.degrees(d_theta.item()), math.degrees(d_azimuth.item()), d_z.item()])
- return d_T
-
-class RTMV(Dataset):
- def __init__(self, root_dir='datasets/RTMV/google_scanned',\
- first_K=64, resolution=256, load_target=False):
- self.root_dir = root_dir
- self.scene_list = sorted(next(os.walk(root_dir))[1])
- self.resolution = resolution
- self.first_K = first_K
- self.load_target = load_target
-
- def __len__(self):
- return len(self.scene_list)
-
- def __getitem__(self, idx):
- scene_dir = os.path.join(self.root_dir, self.scene_list[idx])
- with open(os.path.join(scene_dir, 'transforms.json'), "r") as f:
- meta = json.load(f)
- imgs = []
- poses = []
- for i_img in range(self.first_K):
- meta_img = meta['frames'][i_img]
-
- if i_img == 0 or self.load_target:
- img_path = os.path.join(scene_dir, meta_img['file_path'])
- img = imageio.imread(img_path)
- img = cv2.resize(img, (self.resolution, self.resolution), interpolation = cv2.INTER_LINEAR)
- imgs.append(img)
-
- c2w = meta_img['transform_matrix']
- poses.append(c2w)
-
- imgs = (np.array(imgs) / 255.).astype(np.float32) # (RGBA) imgs
- imgs = torch.tensor(self.blend_rgba(imgs)).permute(0, 3, 1, 2)
- imgs = imgs * 2 - 1. # convert to stable diffusion range
- poses = torch.tensor(np.array(poses).astype(np.float32))
- return imgs, poses
-
- def blend_rgba(self, img):
- img = img[..., :3] * img[..., -1:] + (1. - img[..., -1:]) # blend A to RGB
- return img
-
-
-class GSO(Dataset):
- def __init__(self, root_dir='datasets/GoogleScannedObjects',\
- split='val', first_K=5, resolution=256, load_target=False, name='render_mvs'):
- self.root_dir = root_dir
- with open(os.path.join(root_dir, '%s.json' % split), "r") as f:
- self.scene_list = json.load(f)
- self.resolution = resolution
- self.first_K = first_K
- self.load_target = load_target
- self.name = name
-
- def __len__(self):
- return len(self.scene_list)
-
- def __getitem__(self, idx):
- scene_dir = os.path.join(self.root_dir, self.scene_list[idx])
- with open(os.path.join(scene_dir, 'transforms_%s.json' % self.name), "r") as f:
- meta = json.load(f)
- imgs = []
- poses = []
- for i_img in range(self.first_K):
- meta_img = meta['frames'][i_img]
-
- if i_img == 0 or self.load_target:
- img_path = os.path.join(scene_dir, meta_img['file_path'])
- img = imageio.imread(img_path)
- img = cv2.resize(img, (self.resolution, self.resolution), interpolation = cv2.INTER_LINEAR)
- imgs.append(img)
-
- c2w = meta_img['transform_matrix']
- poses.append(c2w)
-
- imgs = (np.array(imgs) / 255.).astype(np.float32) # (RGBA) imgs
- mask = imgs[:, :, :, -1]
- imgs = torch.tensor(self.blend_rgba(imgs)).permute(0, 3, 1, 2)
- imgs = imgs * 2 - 1. # convert to stable diffusion range
- poses = torch.tensor(np.array(poses).astype(np.float32))
- return imgs, poses
-
- def blend_rgba(self, img):
- img = img[..., :3] * img[..., -1:] + (1. - img[..., -1:]) # blend A to RGB
- return img
-
-class WILD(Dataset):
- def __init__(self, root_dir='data/nerf_wild',\
- first_K=33, resolution=256, load_target=False):
- self.root_dir = root_dir
- self.scene_list = sorted(next(os.walk(root_dir))[1])
- self.resolution = resolution
- self.first_K = first_K
- self.load_target = load_target
-
- def __len__(self):
- return len(self.scene_list)
-
- def __getitem__(self, idx):
- scene_dir = os.path.join(self.root_dir, self.scene_list[idx])
- with open(os.path.join(scene_dir, 'transforms_train.json'), "r") as f:
- meta = json.load(f)
- imgs = []
- poses = []
- for i_img in range(self.first_K):
- meta_img = meta['frames'][i_img]
-
- if i_img == 0 or self.load_target:
- img_path = os.path.join(scene_dir, meta_img['file_path'])
- img = imageio.imread(img_path + '.png')
- img = cv2.resize(img, (self.resolution, self.resolution), interpolation = cv2.INTER_LINEAR)
- imgs.append(img)
-
- c2w = meta_img['transform_matrix']
- poses.append(c2w)
-
- imgs = (np.array(imgs) / 255.).astype(np.float32) # (RGBA) imgs
- imgs = torch.tensor(self.blend_rgba(imgs)).permute(0, 3, 1, 2)
- imgs = imgs * 2 - 1. # convert to stable diffusion range
- poses = torch.tensor(np.array(poses).astype(np.float32))
- return imgs, poses
-
- def blend_rgba(self, img):
- img = img[..., :3] * img[..., -1:] + (1. - img[..., -1:]) # blend A to RGB
- return img
\ No newline at end of file
diff --git a/spaces/cymic/Waifu_Diffusion_Webui/javascript/imageMaskFix.js b/spaces/cymic/Waifu_Diffusion_Webui/javascript/imageMaskFix.js
deleted file mode 100644
index 3d77bfe9b45c13ef602098b4bd81586767430360..0000000000000000000000000000000000000000
--- a/spaces/cymic/Waifu_Diffusion_Webui/javascript/imageMaskFix.js
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * temporary fix for https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/668
- * @see https://github.com/gradio-app/gradio/issues/1721
- */
-window.addEventListener( 'resize', () => imageMaskResize());
-function imageMaskResize() {
- const canvases = gradioApp().querySelectorAll('#img2maskimg .touch-none canvas');
- if ( ! canvases.length ) {
- canvases_fixed = false;
- window.removeEventListener( 'resize', imageMaskResize );
- return;
- }
-
- const wrapper = canvases[0].closest('.touch-none');
- const previewImage = wrapper.previousElementSibling;
-
- if ( ! previewImage.complete ) {
- previewImage.addEventListener( 'load', () => imageMaskResize());
- return;
- }
-
- const w = previewImage.width;
- const h = previewImage.height;
- const nw = previewImage.naturalWidth;
- const nh = previewImage.naturalHeight;
- const portrait = nh > nw;
- const factor = portrait;
-
- const wW = Math.min(w, portrait ? h/nh*nw : w/nw*nw);
- const wH = Math.min(h, portrait ? h/nh*nh : w/nw*nh);
-
- wrapper.style.width = `${wW}px`;
- wrapper.style.height = `${wH}px`;
- wrapper.style.left = `${(w-wW)/2}px`;
- wrapper.style.top = `${(h-wH)/2}px`;
-
- canvases.forEach( c => {
- c.style.width = c.style.height = '';
- c.style.maxWidth = '100%';
- c.style.maxHeight = '100%';
- c.style.objectFit = 'contain';
- });
- }
-
- onUiUpdate(() => imageMaskResize());
\ No newline at end of file
diff --git a/spaces/dakaiye/dky_xuexi/docs/self_analysis.md b/spaces/dakaiye/dky_xuexi/docs/self_analysis.md
deleted file mode 100644
index ebc2337194974bf210794df7d858889010fecf08..0000000000000000000000000000000000000000
--- a/spaces/dakaiye/dky_xuexi/docs/self_analysis.md
+++ /dev/null
@@ -1,378 +0,0 @@
-# chatgpt-academic项目自译解报告
-(Author补充:以下分析均由本项目调用ChatGPT一键生成,如果有不准确的地方,全怪GPT😄)
-
-
-| 文件名 | 功能描述 |
-| ------ | ------ |
-| check_proxy.py | 检查代理有效性及地理位置 |
-| colorful.py | 控制台打印彩色文字 |
-| config.py | 配置和参数设置 |
-| config_private.py | 私人配置和参数设置 |
-| core_functional.py | 核心函数和参数设置 |
-| crazy_functional.py | 高级功能插件集合 |
-| main.py | 一个 Chatbot 程序,提供各种学术翻译、文本处理和其他查询服务 |
-| multi_language.py | 识别和翻译不同语言 |
-| theme.py | 自定义 gradio 应用程序主题 |
-| toolbox.py | 工具类库,用于协助实现各种功能 |
-| crazy_functions\crazy_functions_test.py | 测试 crazy_functions 中的各种函数 |
-| crazy_functions\crazy_utils.py | 工具函数,用于字符串处理、异常检测、Markdown 格式转换等 |
-| crazy_functions\Latex全文润色.py | 对整个 Latex 项目进行润色和纠错 |
-| crazy_functions\Latex全文翻译.py | 对整个 Latex 项目进行翻译 |
-| crazy_functions\\_\_init\_\_.py | 模块初始化文件,标识 `crazy_functions` 是一个包 |
-| crazy_functions\下载arxiv论文翻译摘要.py | 下载 `arxiv` 论文的 PDF 文件,并提取摘要和翻译 |
-| crazy_functions\代码重写为全英文_多线程.py | 将Python源代码文件中的中文内容转化为英文 |
-| crazy_functions\图片生成.py | 根据激励文本使用GPT模型生成相应的图像 |
-| crazy_functions\对话历史存档.py | 将每次对话记录写入Markdown格式的文件中 |
-| crazy_functions\总结word文档.py | 对输入的word文档进行摘要生成 |
-| crazy_functions\总结音视频.py | 对输入的音视频文件进行摘要生成 |
-| crazy_functions\批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 |
-| crazy_functions\批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 |
-| crazy_functions\批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 |
-| crazy_functions\批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 |
-| crazy_functions\理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 |
-| crazy_functions\生成函数注释.py | 自动生成Python函数的注释 |
-| crazy_functions\联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 |
-| crazy_functions\解析JupyterNotebook.py | 对Jupyter Notebook进行代码解析 |
-| crazy_functions\解析项目源代码.py | 对指定编程语言的源代码进行解析 |
-| crazy_functions\询问多个大语言模型.py | 使用多个大语言模型对输入进行处理和回复 |
-| crazy_functions\读文章写摘要.py | 对论文进行解析和全文摘要生成 |
-| crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 |
-| crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 |
-| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 |
-| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 |
-| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 |
-| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 |
-| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 |
-| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 |
-| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 |
-| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 |
-| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 |
-| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 |
-| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 |
-| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 |
-| request_llm\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 |
-| request_llm\test_llms.py | 对llm模型进行单元测试。 |
-
-## 接下来请你逐文件分析下面的工程[0/48] 请对下面的程序文件做一个概述: check_proxy.py
-
-这个文件主要包含了五个函数:
-
-1. `check_proxy`:用于检查代理的有效性及地理位置,输出代理配置和所在地信息。
-
-2. `backup_and_download`:用于备份当前版本并下载新版本。
-
-3. `patch_and_restart`:用于覆盖更新当前版本并重新启动程序。
-
-4. `get_current_version`:用于获取当前程序的版本号。
-
-5. `auto_update`:用于自动检查新版本并提示用户更新。如果用户选择更新,则备份并下载新版本,覆盖更新当前版本并重新启动程序。如果更新失败,则输出错误信息,并不会向用户进行任何提示。
-
-还有一个没有函数名的语句`os.environ['no_proxy'] = '*'`,用于设置环境变量,避免代理网络产生意外污染。
-
-此外,该文件导入了以下三个模块/函数:
-
-- `requests`
-- `shutil`
-- `os`
-
-## [1/48] 请对下面的程序文件做一个概述: colorful.py
-
-该文件是一个Python脚本,用于在控制台中打印彩色文字。该文件包含了一些函数,用于以不同颜色打印文本。其中,红色、绿色、黄色、蓝色、紫色、靛色分别以函数 print红、print绿、print黄、print蓝、print紫、print靛 的形式定义;亮红色、亮绿色、亮黄色、亮蓝色、亮紫色、亮靛色分别以 print亮红、print亮绿、print亮黄、print亮蓝、print亮紫、print亮靛 的形式定义。它们使用 ANSI Escape Code 将彩色输出从控制台突出显示。如果运行在 Linux 操作系统上,文件所执行的操作被留空;否则,该文件导入了 colorama 库并调用 init() 函数进行初始化。最后,通过一系列条件语句,该文件通过将所有彩色输出函数的名称重新赋值为 print 函数的名称来避免输出文件的颜色问题。
-
-## [2/48] 请对下面的程序文件做一个概述: config.py
-
-这个程序文件是用来配置和参数设置的。它包含了许多设置,如API key,使用代理,线程数,默认模型,超时时间等等。此外,它还包含了一些高级功能,如URL重定向等。这些设置将会影响到程序的行为和性能。
-
-## [3/48] 请对下面的程序文件做一个概述: config_private.py
-
-这个程序文件是一个Python脚本,文件名为config_private.py。其中包含以下变量的赋值:
-
-1. API_KEY:API密钥。
-2. USE_PROXY:是否应用代理。
-3. proxies:如果使用代理,则设置代理网络的协议(socks5/http)、地址(localhost)和端口(11284)。
-4. DEFAULT_WORKER_NUM:默认的工作线程数量。
-5. SLACK_CLAUDE_BOT_ID:Slack机器人ID。
-6. SLACK_CLAUDE_USER_TOKEN:Slack用户令牌。
-
-## [4/48] 请对下面的程序文件做一个概述: core_functional.py
-
-这是一个名为core_functional.py的源代码文件,该文件定义了一个名为get_core_functions()的函数,该函数返回一个字典,该字典包含了各种学术翻译润色任务的说明和相关参数,如颜色、前缀、后缀等。这些任务包括英语学术润色、中文学术润色、查找语法错误、中译英、学术中英互译、英译中、找图片和参考文献转Bib。其中,一些任务还定义了预处理函数用于处理任务的输入文本。
-
-## [5/48] 请对下面的程序文件做一个概述: crazy_functional.py
-
-此程序文件(crazy_functional.py)是一个函数插件集合,包含了多个函数插件的定义和调用。这些函数插件旨在提供一些高级功能,如解析项目源代码、批量翻译PDF文档和Latex全文润色等。其中一些插件还支持热更新功能,不需要重启程序即可生效。文件中的函数插件按照功能进行了分类(第一组和第二组),并且有不同的调用方式(作为按钮或下拉菜单)。
-
-## [6/48] 请对下面的程序文件做一个概述: main.py
-
-这是一个Python程序文件,文件名为main.py。该程序包含一个名为main的函数,程序会自动运行该函数。程序要求已经安装了gradio、os等模块,会根据配置文件加载代理、model、API Key等信息。程序提供了Chatbot功能,实现了一个对话界面,用户可以输入问题,然后Chatbot可以回答问题或者提供相关功能。程序还包含了基础功能区、函数插件区、更换模型 & SysPrompt & 交互界面布局、备选输入区,用户可以在这些区域选择功能和插件进行使用。程序中还包含了一些辅助模块,如logging等。
-
-## [7/48] 请对下面的程序文件做一个概述: multi_language.py
-
-该文件multi_language.py是用于将项目翻译成不同语言的程序。它包含了以下函数和变量:lru_file_cache、contains_chinese、split_list、map_to_json、read_map_from_json、advanced_split、trans、trans_json、step_1_core_key_translate、CACHE_FOLDER、blacklist、LANG、TransPrompt、cached_translation等。注释和文档字符串提供了有关程序的说明,例如如何使用该程序,如何修改“LANG”和“TransPrompt”变量等。
-
-## [8/48] 请对下面的程序文件做一个概述: theme.py
-
-这是一个Python源代码文件,文件名为theme.py。此文件中定义了一个函数adjust_theme,其功能是自定义gradio应用程序的主题,包括调整颜色、字体、阴影等。如果允许,则添加一个看板娘。此文件还包括变量advanced_css,其中包含一些CSS样式,用于高亮显示代码和自定义聊天框样式。此文件还导入了get_conf函数和gradio库。
-
-## [9/48] 请对下面的程序文件做一个概述: toolbox.py
-
-toolbox.py是一个工具类库,其中主要包含了一些函数装饰器和小工具函数,用于协助实现聊天机器人所需的各种功能,包括文本处理、功能插件加载、异常检测、Markdown格式转换,文件读写等等。此外,该库还包含一些依赖、参数配置等信息。该库易于理解和维护。
-
-## [10/48] 请对下面的程序文件做一个概述: crazy_functions\crazy_functions_test.py
-
-这个文件是一个Python测试模块,用于测试crazy_functions中的各种函数插件。这些函数包括:解析Python项目源代码、解析Cpp项目源代码、Latex全文润色、Markdown中译英、批量翻译PDF文档、谷歌检索小助手、总结word文档、下载arxiv论文并翻译摘要、联网回答问题、和解析Jupyter Notebooks。对于每个函数插件,都有一个对应的测试函数来进行测试。
-
-## [11/48] 请对下面的程序文件做一个概述: crazy_functions\crazy_utils.py
-
-这个Python文件中包括了两个函数:
-
-1. `input_clipping`: 该函数用于裁剪输入文本长度,使其不超过一定的限制。
-2. `request_gpt_model_in_new_thread_with_ui_alive`: 该函数用于请求 GPT 模型并保持用户界面的响应,支持多线程和实时更新用户界面。
-
-这两个函数都依赖于从 `toolbox` 和 `request_llm` 中导入的一些工具函数。函数的输入和输出有详细的描述文档。
-
-## [12/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文润色.py
-
-这是一个Python程序文件,文件名为crazy_functions\Latex全文润色.py。文件包含了一个PaperFileGroup类和三个函数Latex英文润色,Latex中文润色和Latex英文纠错。程序使用了字符串处理、正则表达式、文件读写、多线程等技术,主要作用是对整个Latex项目进行润色和纠错。其中润色和纠错涉及到了对文本的语法、清晰度和整体可读性等方面的提升。此外,该程序还参考了第三方库,并封装了一些工具函数。
-
-## [13/48] 请对下面的程序文件做一个概述: crazy_functions\Latex全文翻译.py
-
-这个文件包含两个函数 `Latex英译中` 和 `Latex中译英`,它们都会对整个Latex项目进行翻译。这个文件还包含一个类 `PaperFileGroup`,它拥有一个方法 `run_file_split`,用于把长文本文件分成多个短文件。其中使用了工具库 `toolbox` 中的一些函数和从 `request_llm` 中导入了 `model_info`。接下来的函数把文件读取进来,把它们的注释删除,进行分割,并进行翻译。这个文件还包括了一些异常处理和界面更新的操作。
-
-## [14/48] 请对下面的程序文件做一个概述: crazy_functions\__init__.py
-
-这是一个Python模块的初始化文件(__init__.py),命名为"crazy_functions"。该模块包含了一些疯狂的函数,但该文件并没有实现这些函数,而是作为一个包(package)来导入其它的Python模块以实现这些函数。在该文件中,没有定义任何类或函数,它唯一的作用就是标识"crazy_functions"模块是一个包。
-
-## [15/48] 请对下面的程序文件做一个概述: crazy_functions\下载arxiv论文翻译摘要.py
-
-这是一个 Python 程序文件,文件名为 `下载arxiv论文翻译摘要.py`。程序包含多个函数,其中 `下载arxiv论文并翻译摘要` 函数的作用是下载 `arxiv` 论文的 PDF 文件,提取摘要并使用 GPT 对其进行翻译。其他函数包括用于下载 `arxiv` 论文的 `download_arxiv_` 函数和用于获取文章信息的 `get_name` 函数,其中涉及使用第三方库如 requests, BeautifulSoup 等。该文件还包含一些用于调试和存储文件的代码段。
-
-## [16/48] 请对下面的程序文件做一个概述: crazy_functions\代码重写为全英文_多线程.py
-
-该程序文件是一个多线程程序,主要功能是将指定目录下的所有Python代码文件中的中文内容转化为英文,并将转化后的代码存储到一个新的文件中。其中,程序使用了GPT-3等技术进行中文-英文的转化,同时也进行了一些Token限制下的处理,以防止程序发生错误。程序在执行过程中还会输出一些提示信息,并将所有转化过的代码文件存储到指定目录下。在程序执行结束后,还会生成一个任务执行报告,记录程序运行的详细信息。
-
-## [17/48] 请对下面的程序文件做一个概述: crazy_functions\图片生成.py
-
-该程序文件提供了一个用于生成图像的函数`图片生成`。函数实现的过程中,会调用`gen_image`函数来生成图像,并返回图像生成的网址和本地文件地址。函数有多个参数,包括`prompt`(激励文本)、`llm_kwargs`(GPT模型的参数)、`plugin_kwargs`(插件模型的参数)等。函数核心代码使用了`requests`库向OpenAI API请求图像,并做了简单的处理和保存。函数还更新了交互界面,清空聊天历史并显示正在生成图像的消息和最终的图像网址和预览。
-
-## [18/48] 请对下面的程序文件做一个概述: crazy_functions\对话历史存档.py
-
-这个文件是名为crazy_functions\对话历史存档.py的Python程序文件,包含了4个函数:
-
-1. write_chat_to_file(chatbot, history=None, file_name=None):用来将对话记录以Markdown格式写入文件中,并且生成文件名,如果没指定文件名则用当前时间。写入完成后将文件路径打印出来。
-
-2. gen_file_preview(file_name):从传入的文件中读取内容,解析出对话历史记录并返回前100个字符,用于文件预览。
-
-3. read_file_to_chat(chatbot, history, file_name):从传入的文件中读取内容,解析出对话历史记录并更新聊天显示框。
-
-4. 对话历史存档(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):一个主要函数,用于保存当前对话记录并提醒用户。如果用户希望加载历史记录,则调用read_file_to_chat()来更新聊天显示框。如果用户希望删除历史记录,调用删除所有本地对话历史记录()函数完成删除操作。
-
-## [19/48] 请对下面的程序文件做一个概述: crazy_functions\总结word文档.py
-
-该程序文件实现了一个总结Word文档的功能,使用Python的docx库读取docx格式的文件,使用pywin32库读取doc格式的文件。程序会先根据传入的txt参数搜索需要处理的文件,并逐个解析其中的内容,将内容拆分为指定长度的文章片段,然后使用另一个程序文件中的request_gpt_model_in_new_thread_with_ui_alive函数进行中文概述。最后将所有的总结结果写入一个文件中,并在界面上进行展示。
-
-## [20/48] 请对下面的程序文件做一个概述: crazy_functions\总结音视频.py
-
-该程序文件包括两个函数:split_audio_file()和AnalyAudio(),并且导入了一些必要的库并定义了一些工具函数。split_audio_file用于将音频文件分割成多个时长相等的片段,返回一个包含所有切割音频片段文件路径的列表,而AnalyAudio用来分析音频文件,通过调用whisper模型进行音频转文字并使用GPT模型对音频内容进行概述,最终将所有总结结果写入结果文件中。
-
-## [21/48] 请对下面的程序文件做一个概述: crazy_functions\批量Markdown翻译.py
-
-该程序文件名为`批量Markdown翻译.py`,包含了以下功能:读取Markdown文件,将长文本分离开来,将Markdown文件进行翻译(英译中和中译英),整理结果并退出。程序使用了多线程以提高效率。程序使用了`tiktoken`依赖库,可能需要额外安装。文件中还有一些其他的函数和类,但与文件名所描述的功能无关。
-
-## [22/48] 请对下面的程序文件做一个概述: crazy_functions\批量总结PDF文档.py
-
-该文件是一个Python脚本,名为crazy_functions\批量总结PDF文档.py。在导入了一系列库和工具函数后,主要定义了5个函数,其中包括一个错误处理装饰器(@CatchException),用于批量总结PDF文档。该函数主要实现对PDF文档的解析,并调用模型生成中英文摘要。
-
-## [23/48] 请对下面的程序文件做一个概述: crazy_functions\批量总结PDF文档pdfminer.py
-
-该程序文件是一个用于批量总结PDF文档的函数插件,使用了pdfminer插件和BeautifulSoup库来提取PDF文档的文本内容,对每个PDF文件分别进行处理并生成中英文摘要。同时,该程序文件还包括一些辅助工具函数和处理异常的装饰器。
-
-## [24/48] 请对下面的程序文件做一个概述: crazy_functions\批量翻译PDF文档_多线程.py
-
-这个程序文件是一个Python脚本,文件名为“批量翻译PDF文档_多线程.py”。它主要使用了“toolbox”、“request_gpt_model_in_new_thread_with_ui_alive”、“request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency”、“colorful”等Python库和自定义的模块“crazy_utils”的一些函数。程序实现了一个批量翻译PDF文档的功能,可以自动解析PDF文件中的基础信息,递归地切割PDF文件,翻译和处理PDF论文中的所有内容,并生成相应的翻译结果文件(包括md文件和html文件)。功能比较复杂,其中需要调用多个函数和依赖库,涉及到多线程操作和UI更新。文件中有详细的注释和变量命名,代码比较清晰易读。
-
-## [25/48] 请对下面的程序文件做一个概述: crazy_functions\理解PDF文档内容.py
-
-该程序文件实现了一个名为“理解PDF文档内容”的函数,该函数可以为输入的PDF文件提取摘要以及正文各部分的主要内容,并在提取过程中根据上下文关系进行学术性问题解答。该函数依赖于多个辅助函数和第三方库,并在执行过程中针对可能出现的异常进行了处理。
-
-## [26/48] 请对下面的程序文件做一个概述: crazy_functions\生成函数注释.py
-
-该程序文件是一个Python模块文件,文件名为“生成函数注释.py”,定义了两个函数:一个是生成函数注释的主函数“生成函数注释”,另一个是通过装饰器实现异常捕捉的函数“批量生成函数注释”。该程序文件依赖于“toolbox”和本地“crazy_utils”模块,并且在运行时使用了多线程技术和GPT模型来生成注释。函数生成的注释结果使用Markdown表格输出并写入历史记录文件。
-
-## [27/48] 请对下面的程序文件做一个概述: crazy_functions\联网的ChatGPT.py
-
-这是一个名为`联网的ChatGPT.py`的Python程序文件,其中定义了一个函数`连接网络回答问题`。该函数通过爬取搜索引擎的结果和访问网页来综合回答给定的问题,并使用ChatGPT模型完成回答。此外,该文件还包括一些工具函数,例如从网页中抓取文本和使用代理访问网页。
-
-## [28/48] 请对下面的程序文件做一个概述: crazy_functions\解析JupyterNotebook.py
-
-这个程序文件包含了两个函数: `parseNotebook()`和`解析ipynb文件()`,并且引入了一些工具函数和类。`parseNotebook()`函数将Jupyter Notebook文件解析为文本代码块,`解析ipynb文件()`函数则用于解析多个Jupyter Notebook文件,使用`parseNotebook()`解析每个文件和一些其他的处理。函数中使用了多线程处理输入和输出,并且将结果写入到文件中。
-
-## [29/48] 请对下面的程序文件做一个概述: crazy_functions\解析项目源代码.py
-
-这是一个源代码分析的Python代码文件,其中定义了多个函数,包括解析一个Python项目、解析一个C项目、解析一个C项目的头文件和解析一个Java项目等。其中解析源代码新函数是实际处理源代码分析并生成报告的函数。该函数首先会逐个读取传入的源代码文件,生成对应的请求内容,通过多线程发送到chatgpt进行分析。然后将结果写入文件,并进行汇总分析。最后通过调用update_ui函数刷新界面,完整实现了源代码的分析。
-
-## [30/48] 请对下面的程序文件做一个概述: crazy_functions\询问多个大语言模型.py
-
-该程序文件包含两个函数:同时问询()和同时问询_指定模型(),它们的作用是使用多个大语言模型同时对用户输入进行处理,返回对应模型的回复结果。同时问询()会默认使用ChatGPT和ChatGLM两个模型,而同时问询_指定模型()则可以指定要使用的模型。该程序文件还引用了其他的模块和函数库。
-
-## [31/48] 请对下面的程序文件做一个概述: crazy_functions\读文章写摘要.py
-
-这个程序文件是一个Python模块,文件名为crazy_functions\读文章写摘要.py。该模块包含了两个函数,其中主要函数是"读文章写摘要"函数,其实现了解析给定文件夹中的tex文件,对其中每个文件的内容进行摘要生成,并根据各论文片段的摘要,最终生成全文摘要。第二个函数是"解析Paper"函数,用于解析单篇论文文件。其中用到了一些工具函数和库,如update_ui、CatchException、report_execption、write_results_to_file等。
-
-## [32/48] 请对下面的程序文件做一个概述: crazy_functions\谷歌检索小助手.py
-
-该文件是一个Python模块,文件名为“谷歌检索小助手.py”。该模块包含两个函数,一个是“get_meta_information()”,用于从提供的网址中分析出所有相关的学术文献的元数据信息;另一个是“谷歌检索小助手()”,是主函数,用于分析用户提供的谷歌学术搜索页面中出现的文章,并提取相关信息。其中,“谷歌检索小助手()”函数依赖于“get_meta_information()”函数,并调用了其他一些Python模块,如“arxiv”、“math”、“bs4”等。
-
-## [33/48] 请对下面的程序文件做一个概述: crazy_functions\高级功能函数模板.py
-
-该程序文件定义了一个名为高阶功能模板函数的函数,该函数接受多个参数,包括输入的文本、gpt模型参数、插件模型参数、聊天显示框的句柄、聊天历史等,并利用送出请求,使用 Unsplash API 发送相关图片。其中,为了避免输入溢出,函数会在开始时清空历史。函数也有一些 UI 更新的语句。该程序文件还依赖于其他两个模块:CatchException 和 update_ui,以及一个名为 request_gpt_model_in_new_thread_with_ui_alive 的来自 crazy_utils 模块(应该是自定义的工具包)的函数。
-
-## [34/48] 请对下面的程序文件做一个概述: request_llm\bridge_all.py
-
-该文件包含两个函数:predict和predict_no_ui_long_connection,用于基于不同的LLM模型进行对话。该文件还包含一个lazyloadTiktoken类和一个LLM_CATCH_EXCEPTION修饰器函数。其中lazyloadTiktoken类用于懒加载模型的tokenizer,LLM_CATCH_EXCEPTION用于错误处理。整个文件还定义了一些全局变量和模型信息字典,用于引用和配置LLM模型。
-
-## [35/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatglm.py
-
-这是一个Python程序文件,名为`bridge_chatglm.py`,其中定义了一个名为`GetGLMHandle`的类和三个方法:`predict_no_ui_long_connection`、 `predict`和 `stream_chat`。该文件依赖于多个Python库,如`transformers`和`sentencepiece`。该文件实现了一个聊天机器人,使用ChatGLM模型来生成回复,支持单线程和多线程方式。程序启动时需要加载ChatGLM的模型和tokenizer,需要一段时间。在配置文件`config.py`中设置参数会影响模型的内存和显存使用,因此程序可能会导致低配计算机卡死。
-
-## [36/48] 请对下面的程序文件做一个概述: request_llm\bridge_chatgpt.py
-
-该文件为 Python 代码文件,文件名为 request_llm\bridge_chatgpt.py。该代码文件主要提供三个函数:predict、predict_no_ui和 predict_no_ui_long_connection,用于发送至 chatGPT 并等待回复,获取输出。该代码文件还包含一些辅助函数,用于处理连接异常、生成 HTTP 请求等。该文件的代码架构清晰,使用了多个自定义函数和模块。
-
-## [37/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_llama.py
-
-该代码文件实现了一个聊天机器人,其中使用了 JittorLLMs 模型。主要包括以下几个部分:
-1. GetGLMHandle 类:一个进程类,用于加载 JittorLLMs 模型并接收并处理请求。
-2. predict_no_ui_long_connection 函数:一个多线程方法,用于在后台运行聊天机器人。
-3. predict 函数:一个单线程方法,用于在前端页面上交互式调用聊天机器人,以获取用户输入并返回相应的回复。
-
-这个文件中还有一些辅助函数和全局变量,例如 importlib、time、threading 等。
-
-## [38/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_pangualpha.py
-
-这个文件是为了实现使用jittorllms(一种机器学习模型)来进行聊天功能的代码。其中包括了模型加载、模型的参数加载、消息的收发等相关操作。其中使用了多进程和多线程来提高性能和效率。代码中还包括了处理依赖关系的函数和预处理函数等。
-
-## [39/48] 请对下面的程序文件做一个概述: request_llm\bridge_jittorllms_rwkv.py
-
-这个文件是一个Python程序,文件名为request_llm\bridge_jittorllms_rwkv.py。它依赖transformers、time、threading、importlib、multiprocessing等库。在文件中,通过定义GetGLMHandle类加载jittorllms模型参数和定义stream_chat方法来实现与jittorllms模型的交互。同时,该文件还定义了predict_no_ui_long_connection和predict方法来处理历史信息、调用jittorllms模型、接收回复信息并输出结果。
-
-## [40/48] 请对下面的程序文件做一个概述: request_llm\bridge_moss.py
-
-该文件为一个Python源代码文件,文件名为 request_llm\bridge_moss.py。代码定义了一个 GetGLMHandle 类和两个函数 predict_no_ui_long_connection 和 predict。
-
-GetGLMHandle 类继承自Process类(多进程),主要功能是启动一个子进程并加载 MOSS 模型参数,通过 Pipe 进行主子进程的通信。该类还定义了 check_dependency、moss_init、run 和 stream_chat 等方法,其中 check_dependency 和 moss_init 是子进程的初始化方法,run 是子进程运行方法,stream_chat 实现了主进程和子进程的交互过程。
-
-函数 predict_no_ui_long_connection 是多线程方法,调用 GetGLMHandle 类加载 MOSS 参数后使用 stream_chat 实现主进程和子进程的交互过程。
-
-函数 predict 是单线程方法,通过调用 update_ui 将交互过程中 MOSS 的回复实时更新到UI(User Interface)中,并执行一个 named function(additional_fn)指定的函数对输入进行预处理。
-
-## [41/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbing.py
-
-这是一个名为`bridge_newbing.py`的程序文件,包含三个部分:
-
-第一部分使用from语句导入了`edge_gpt`模块的`NewbingChatbot`类。
-
-第二部分定义了一个名为`NewBingHandle`的继承自进程类的子类,该类会检查依赖性并启动进程。同时,该部分还定义了一个名为`predict_no_ui_long_connection`的多线程方法和一个名为`predict`的单线程方法,用于与NewBing进行通信。
-
-第三部分定义了一个名为`newbing_handle`的全局变量,并导出了`predict_no_ui_long_connection`和`predict`这两个方法,以供其他程序可以调用。
-
-## [42/48] 请对下面的程序文件做一个概述: request_llm\bridge_newbingfree.py
-
-这个Python文件包含了三部分内容。第一部分是来自edge_gpt_free.py文件的聊天机器人程序。第二部分是子进程Worker,用于调用主体。第三部分提供了两个函数:predict_no_ui_long_connection和predict用于调用NewBing聊天机器人和返回响应。其中predict函数还提供了一些参数用于控制聊天机器人的回复和更新UI界面。
-
-## [43/48] 请对下面的程序文件做一个概述: request_llm\bridge_stackclaude.py
-
-这是一个Python源代码文件,文件名为request_llm\bridge_stackclaude.py。代码分为三个主要部分:
-
-第一部分定义了Slack API Client类,实现Slack消息的发送、接收、循环监听,用于与Slack API进行交互。
-
-第二部分定义了ClaudeHandle类,继承Process类,用于创建子进程Worker,调用主体,实现Claude与用户交互的功能。
-
-第三部分定义了predict_no_ui_long_connection和predict两个函数,主要用于通过调用ClaudeHandle对象的stream_chat方法来获取Claude的回复,并更新ui以显示相关信息。其中predict函数采用单线程方法,而predict_no_ui_long_connection函数使用多线程方法。
-
-## [44/48] 请对下面的程序文件做一个概述: request_llm\bridge_tgui.py
-
-该文件是一个Python代码文件,名为request_llm\bridge_tgui.py。它包含了一些函数用于与chatbot UI交互,并通过WebSocket协议与远程LLM模型通信完成文本生成任务,其中最重要的函数是predict()和predict_no_ui_long_connection()。这个程序还有其他的辅助函数,如random_hash()。整个代码文件在协作的基础上完成了一次修改。
-
-## [45/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt.py
-
-该文件是一个用于调用Bing chatbot API的Python程序,它由多个类和辅助函数构成,可以根据给定的对话连接在对话中提出问题,使用websocket与远程服务通信。程序实现了一个聊天机器人,可以为用户提供人工智能聊天。
-
-## [46/48] 请对下面的程序文件做一个概述: request_llm\edge_gpt_free.py
-
-该代码文件为一个会话API,可通过Chathub发送消息以返回响应。其中使用了 aiohttp 和 httpx 库进行网络请求并发送。代码中包含了一些函数和常量,多数用于生成请求数据或是请求头信息等。同时该代码文件还包含了一个 Conversation 类,调用该类可实现对话交互。
-
-## [47/48] 请对下面的程序文件做一个概述: request_llm\test_llms.py
-
-这个文件是用于对llm模型进行单元测试的Python程序。程序导入一个名为"request_llm.bridge_newbingfree"的模块,然后三次使用该模块中的predict_no_ui_long_connection()函数进行预测,并输出结果。此外,还有一些注释掉的代码段,这些代码段也是关于模型预测的。
-
-## 用一张Markdown表格简要描述以下文件的功能:
-check_proxy.py, colorful.py, config.py, config_private.py, core_functional.py, crazy_functional.py, main.py, multi_language.py, theme.py, toolbox.py, crazy_functions\crazy_functions_test.py, crazy_functions\crazy_utils.py, crazy_functions\Latex全文润色.py, crazy_functions\Latex全文翻译.py, crazy_functions\__init__.py, crazy_functions\下载arxiv论文翻译摘要.py。根据以上分析,用一句话概括程序的整体功能。
-
-| 文件名 | 功能描述 |
-| ------ | ------ |
-| check_proxy.py | 检查代理有效性及地理位置 |
-| colorful.py | 控制台打印彩色文字 |
-| config.py | 配置和参数设置 |
-| config_private.py | 私人配置和参数设置 |
-| core_functional.py | 核心函数和参数设置 |
-| crazy_functional.py | 高级功能插件集合 |
-| main.py | 一个 Chatbot 程序,提供各种学术翻译、文本处理和其他查询服务 |
-| multi_language.py | 识别和翻译不同语言 |
-| theme.py | 自定义 gradio 应用程序主题 |
-| toolbox.py | 工具类库,用于协助实现各种功能 |
-| crazy_functions\crazy_functions_test.py | 测试 crazy_functions 中的各种函数 |
-| crazy_functions\crazy_utils.py | 工具函数,用于字符串处理、异常检测、Markdown 格式转换等 |
-| crazy_functions\Latex全文润色.py | 对整个 Latex 项目进行润色和纠错 |
-| crazy_functions\Latex全文翻译.py | 对整个 Latex 项目进行翻译 |
-| crazy_functions\__init__.py | 模块初始化文件,标识 `crazy_functions` 是一个包 |
-| crazy_functions\下载arxiv论文翻译摘要.py | 下载 `arxiv` 论文的 PDF 文件,并提取摘要和翻译 |
-
-这些程序源文件提供了基础的文本和语言处理功能、工具函数和高级插件,使 Chatbot 能够处理各种复杂的学术文本问题,包括润色、翻译、搜索、下载、解析等。
-
-## 用一张Markdown表格简要描述以下文件的功能:
-crazy_functions\代码重写为全英文_多线程.py, crazy_functions\图片生成.py, crazy_functions\对话历史存档.py, crazy_functions\总结word文档.py, crazy_functions\总结音视频.py, crazy_functions\批量Markdown翻译.py, crazy_functions\批量总结PDF文档.py, crazy_functions\批量总结PDF文档pdfminer.py, crazy_functions\批量翻译PDF文档_多线程.py, crazy_functions\理解PDF文档内容.py, crazy_functions\生成函数注释.py, crazy_functions\联网的ChatGPT.py, crazy_functions\解析JupyterNotebook.py, crazy_functions\解析项目源代码.py, crazy_functions\询问多个大语言模型.py, crazy_functions\读文章写摘要.py。根据以上分析,用一句话概括程序的整体功能。
-
-| 文件名 | 功能简述 |
-| --- | --- |
-| 代码重写为全英文_多线程.py | 将Python源代码文件中的中文内容转化为英文 |
-| 图片生成.py | 根据激励文本使用GPT模型生成相应的图像 |
-| 对话历史存档.py | 将每次对话记录写入Markdown格式的文件中 |
-| 总结word文档.py | 对输入的word文档进行摘要生成 |
-| 总结音视频.py | 对输入的音视频文件进行摘要生成 |
-| 批量Markdown翻译.py | 将指定目录下的Markdown文件进行中英文翻译 |
-| 批量总结PDF文档.py | 对PDF文件进行切割和摘要生成 |
-| 批量总结PDF文档pdfminer.py | 对PDF文件进行文本内容的提取和摘要生成 |
-| 批量翻译PDF文档_多线程.py | 将指定目录下的PDF文件进行中英文翻译 |
-| 理解PDF文档内容.py | 对PDF文件进行摘要生成和问题解答 |
-| 生成函数注释.py | 自动生成Python函数的注释 |
-| 联网的ChatGPT.py | 使用网络爬虫和ChatGPT模型进行聊天回答 |
-| 解析JupyterNotebook.py | 对Jupyter Notebook进行代码解析 |
-| 解析项目源代码.py | 对指定编程语言的源代码进行解析 |
-| 询问多个大语言模型.py | 使用多个大语言模型对输入进行处理和回复 |
-| 读文章写摘要.py | 对论文进行解析和全文摘要生成 |
-
-概括程序的整体功能:提供了一系列处理文本、文件和代码的功能,使用了各类语言模型、多线程、网络请求和数据解析技术来提高效率和精度。
-
-## 用一张Markdown表格简要描述以下文件的功能:
-crazy_functions\谷歌检索小助手.py, crazy_functions\高级功能函数模板.py, request_llm\bridge_all.py, request_llm\bridge_chatglm.py, request_llm\bridge_chatgpt.py, request_llm\bridge_jittorllms_llama.py, request_llm\bridge_jittorllms_pangualpha.py, request_llm\bridge_jittorllms_rwkv.py, request_llm\bridge_moss.py, request_llm\bridge_newbing.py, request_llm\bridge_newbingfree.py, request_llm\bridge_stackclaude.py, request_llm\bridge_tgui.py, request_llm\edge_gpt.py, request_llm\edge_gpt_free.py, request_llm\test_llms.py。根据以上分析,用一句话概括程序的整体功能。
-
-| 文件名 | 功能描述 |
-| --- | --- |
-| crazy_functions\谷歌检索小助手.py | 提供谷歌学术搜索页面中相关文章的元数据信息。 |
-| crazy_functions\高级功能函数模板.py | 使用Unsplash API发送相关图片以回复用户的输入。 |
-| request_llm\bridge_all.py | 基于不同LLM模型进行对话。 |
-| request_llm\bridge_chatglm.py | 使用ChatGLM模型生成回复,支持单线程和多线程方式。 |
-| request_llm\bridge_chatgpt.py | 基于GPT模型完成对话。 |
-| request_llm\bridge_jittorllms_llama.py | 使用JittorLLMs模型完成对话,支持单线程和多线程方式。 |
-| request_llm\bridge_jittorllms_pangualpha.py | 使用JittorLLMs模型完成对话,基于多进程和多线程方式。 |
-| request_llm\bridge_jittorllms_rwkv.py | 使用JittorLLMs模型完成聊天功能,提供包括历史信息、参数调节等在内的多个功能选项。 |
-| request_llm\bridge_moss.py | 加载Moss模型完成对话功能。 |
-| request_llm\bridge_newbing.py | 使用Newbing聊天机器人进行对话,支持单线程和多线程方式。 |
-| request_llm\bridge_newbingfree.py | 基于Bing chatbot API实现聊天机器人的文本生成功能。 |
-| request_llm\bridge_stackclaude.py | 基于Slack API实现Claude与用户的交互。 |
-| request_llm\bridge_tgui.py | 通过websocket实现聊天机器人与UI界面交互。 |
-| request_llm\edge_gpt.py | 调用Bing chatbot API提供聊天机器人服务。 |
-| request_llm\edge_gpt_free.py | 实现聊天机器人API,采用aiohttp和httpx工具库。 |
-| request_llm\test_llms.py | 对llm模型进行单元测试。 |
-| 程序整体功能 | 实现不同种类的聊天机器人,可以根据输入进行文本生成。 |
diff --git a/spaces/davidrd123/WikiArt_20genre/README.md b/spaces/davidrd123/WikiArt_20genre/README.md
deleted file mode 100644
index 10796c125a62d52326797418925c7d3f13e246e6..0000000000000000000000000000000000000000
--- a/spaces/davidrd123/WikiArt_20genre/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: WikiArt_18genre
-emoji: 💻
-colorFrom: gray
-colorTo: gray
-sdk: gradio
-sdk_version: 2.9.4
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/davidtsong/whisper-demo/README.md b/spaces/davidtsong/whisper-demo/README.md
deleted file mode 100644
index 376c39d878607293693aa1bb328612dc211b23d4..0000000000000000000000000000000000000000
--- a/spaces/davidtsong/whisper-demo/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Whisper Demo
-emoji: 📈
-colorFrom: green
-colorTo: red
-sdk: gradio
-sdk_version: 3.3.1
-app_file: app.py
-pinned: false
----
-
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/__init__.py
deleted file mode 100644
index db7a0c42171b690af349971cc8efcaa30f8ccef5..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/__init__.py
+++ /dev/null
@@ -1,110 +0,0 @@
-import pkgutil
-
-import gradio.components as components
-import gradio.inputs as inputs
-import gradio.outputs as outputs
-import gradio.processing_utils
-import gradio.templates
-import gradio.themes as themes
-from gradio.blocks import Blocks
-from gradio.chat_interface import ChatInterface
-from gradio.components import (
- HTML,
- JSON,
- AnnotatedImage,
- Annotatedimage,
- Audio,
- BarPlot,
- Button,
- Carousel,
- Chatbot,
- Checkbox,
- CheckboxGroup,
- Checkboxgroup,
- ClearButton,
- Code,
- ColorPicker,
- DataFrame,
- Dataframe,
- Dataset,
- Dropdown,
- DuplicateButton,
- File,
- Gallery,
- Highlight,
- HighlightedText,
- Highlightedtext,
- Image,
- Interpretation,
- Json,
- Label,
- LinePlot,
- LoginButton,
- LogoutButton,
- Markdown,
- Model3D,
- Number,
- Plot,
- Radio,
- ScatterPlot,
- Slider,
- State,
- StatusTracker,
- Text,
- Textbox,
- TimeSeries,
- Timeseries,
- UploadButton,
- Variable,
- Video,
- component,
-)
-from gradio.deploy_space import deploy
-from gradio.events import SelectData
-from gradio.exceptions import Error
-from gradio.external import load
-from gradio.flagging import (
- CSVLogger,
- FlaggingCallback,
- HuggingFaceDatasetJSONSaver,
- HuggingFaceDatasetSaver,
- SimpleCSVLogger,
-)
-from gradio.helpers import (
- EventData,
- Info,
- Progress,
- Warning,
- make_waveform,
- skip,
- update,
-)
-from gradio.helpers import create_examples as Examples # noqa: N812
-from gradio.interface import Interface, TabbedInterface, close_all
-from gradio.ipython_ext import load_ipython_extension
-from gradio.layouts import Accordion, Box, Column, Group, Row, Tab, TabItem, Tabs
-from gradio.mix import Parallel, Series
-from gradio.oauth import OAuthProfile
-from gradio.routes import Request, mount_gradio_app
-from gradio.templates import (
- Files,
- ImageMask,
- ImagePaint,
- List,
- Matrix,
- Mic,
- Microphone,
- Numpy,
- Paint,
- Pil,
- PlayableVideo,
- Sketchpad,
- TextArea,
- Webcam,
-)
-from gradio.themes import Base as Theme
-
-current_pkg_version = (
- (pkgutil.get_data(__name__, "version.txt") or b"").decode("ascii").strip()
-)
-__version__ = current_pkg_version
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/annotated_image.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/annotated_image.py
deleted file mode 100644
index acb137566dc054a7f6c7c6c9e2734f80960c7db5..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/components/annotated_image.py
+++ /dev/null
@@ -1,243 +0,0 @@
-"""gr.AnnotatedImage() component."""
-
-from __future__ import annotations
-
-from typing import Literal
-
-import numpy as np
-from gradio_client.documentation import document, set_documentation_group
-from gradio_client.serializing import JSONSerializable
-from PIL import Image as _Image # using _ to minimize namespace pollution
-
-from gradio import utils
-from gradio.components.base import IOComponent, _Keywords
-from gradio.deprecation import warn_style_method_deprecation
-from gradio.events import (
- EventListenerMethod,
- Selectable,
-)
-
-set_documentation_group("component")
-
-_Image.init() # fixes https://github.com/gradio-app/gradio/issues/2843
-
-
-@document()
-class AnnotatedImage(Selectable, IOComponent, JSONSerializable):
- """
- Displays a base image and colored subsections on top of that image. Subsections can take the from of rectangles (e.g. object detection) or masks (e.g. image segmentation).
- Preprocessing: this component does *not* accept input.
- Postprocessing: expects a {Tuple[numpy.ndarray | PIL.Image | str, List[Tuple[numpy.ndarray | Tuple[int, int, int, int], str]]]} consisting of a base image and a list of subsections, that are either (x1, y1, x2, y2) tuples identifying object boundaries, or 0-1 confidence masks of the same shape as the image. A label is provided for each subsection.
-
- Demos: image_segmentation
- """
-
- def __init__(
- self,
- value: tuple[
- np.ndarray | _Image.Image | str,
- list[tuple[np.ndarray | tuple[int, int, int, int], str]],
- ]
- | None = None,
- *,
- show_legend: bool = True,
- height: int | None = None,
- width: int | None = None,
- color_map: dict[str, str] | None = None,
- label: str | None = None,
- every: float | None = None,
- show_label: bool | None = None,
- container: bool = True,
- scale: int | None = None,
- min_width: int = 160,
- visible: bool = True,
- elem_id: str | None = None,
- elem_classes: list[str] | str | None = None,
- **kwargs,
- ):
- """
- Parameters:
- value: Tuple of base image and list of (subsection, label) pairs.
- show_legend: If True, will show a legend of the subsections.
- height: Height of the displayed image.
- width: Width of the displayed image.
- color_map: A dictionary mapping labels to colors. The colors must be specified as hex codes.
- label: component name in interface.
- every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
- show_label: if True, will display label.
- container: If True, will place the component in a container - providing some extra padding around the border.
- scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
- min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
- visible: If False, component will be hidden.
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
- elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
- """
- self.show_legend = show_legend
- self.height = height
- self.width = width
- self.color_map = color_map
- self.select: EventListenerMethod
- """
- Event listener for when the user selects Image subsection.
- Uses event data gradio.SelectData to carry `value` referring to selected subsection label, and `index` to refer to subsection index.
- See EventData documentation on how to use this event data.
- """
- IOComponent.__init__(
- self,
- label=label,
- every=every,
- show_label=show_label,
- container=container,
- scale=scale,
- min_width=min_width,
- visible=visible,
- elem_id=elem_id,
- elem_classes=elem_classes,
- value=value,
- **kwargs,
- )
-
- def get_config(self):
- return {
- "show_legend": self.show_legend,
- "value": self.value,
- "height": self.height,
- "width": self.width,
- "color_map": self.color_map,
- "selectable": self.selectable,
- **IOComponent.get_config(self),
- }
-
- @staticmethod
- def update(
- value: tuple[
- np.ndarray | _Image.Image | str,
- list[tuple[np.ndarray | tuple[int, int, int, int], str]],
- ]
- | Literal[_Keywords.NO_VALUE] = _Keywords.NO_VALUE,
- show_legend: bool | None = None,
- height: int | None = None,
- width: int | None = None,
- color_map: dict[str, str] | None = None,
- label: str | None = None,
- show_label: bool | None = None,
- container: bool | None = None,
- scale: int | None = None,
- min_width: int | None = None,
- visible: bool | None = None,
- ):
- updated_config = {
- "show_legend": show_legend,
- "height": height,
- "width": width,
- "color_map": color_map,
- "label": label,
- "show_label": show_label,
- "container": container,
- "scale": scale,
- "min_width": min_width,
- "visible": visible,
- "value": value,
- "__type__": "update",
- }
- return updated_config
-
- def postprocess(
- self,
- y: tuple[
- np.ndarray | _Image.Image | str,
- list[tuple[np.ndarray | tuple[int, int, int, int], str]],
- ],
- ) -> tuple[dict, list[tuple[dict, str]]] | None:
- """
- Parameters:
- y: Tuple of base image and list of subsections, with each subsection a two-part tuple where the first element is a 4 element bounding box or a 0-1 confidence mask, and the second element is the label.
- Returns:
- Tuple of base image file and list of subsections, with each subsection a two-part tuple where the first element image path of the mask, and the second element is the label.
- """
- if y is None:
- return None
- base_img = y[0]
- if isinstance(base_img, str):
- base_img_path = base_img
- base_img = np.array(_Image.open(base_img))
- elif isinstance(base_img, np.ndarray):
- base_file = self.img_array_to_temp_file(base_img, dir=self.DEFAULT_TEMP_DIR)
- base_img_path = str(utils.abspath(base_file))
- elif isinstance(base_img, _Image.Image):
- base_file = self.pil_to_temp_file(base_img, dir=self.DEFAULT_TEMP_DIR)
- base_img_path = str(utils.abspath(base_file))
- base_img = np.array(base_img)
- else:
- raise ValueError(
- "AnnotatedImage only accepts filepaths, PIL images or numpy arrays for the base image."
- )
- self.temp_files.add(base_img_path)
-
- sections = []
- color_map = self.color_map or {}
-
- def hex_to_rgb(value):
- value = value.lstrip("#")
- lv = len(value)
- return [int(value[i : i + lv // 3], 16) for i in range(0, lv, lv // 3)]
-
- for mask, label in y[1]:
- mask_array = np.zeros((base_img.shape[0], base_img.shape[1]))
- if isinstance(mask, np.ndarray):
- mask_array = mask
- else:
- x1, y1, x2, y2 = mask
- border_width = 3
- mask_array[y1:y2, x1:x2] = 0.5
- mask_array[y1:y2, x1 : x1 + border_width] = 1
- mask_array[y1:y2, x2 - border_width : x2] = 1
- mask_array[y1 : y1 + border_width, x1:x2] = 1
- mask_array[y2 - border_width : y2, x1:x2] = 1
-
- if label in color_map:
- rgb_color = hex_to_rgb(color_map[label])
- else:
- rgb_color = [255, 0, 0]
- colored_mask = np.zeros((base_img.shape[0], base_img.shape[1], 4))
- solid_mask = np.copy(mask_array)
- solid_mask[solid_mask > 0] = 1
-
- colored_mask[:, :, 0] = rgb_color[0] * solid_mask
- colored_mask[:, :, 1] = rgb_color[1] * solid_mask
- colored_mask[:, :, 2] = rgb_color[2] * solid_mask
- colored_mask[:, :, 3] = mask_array * 255
-
- colored_mask_img = _Image.fromarray((colored_mask).astype(np.uint8))
-
- mask_file = self.pil_to_temp_file(
- colored_mask_img, dir=self.DEFAULT_TEMP_DIR
- )
- mask_file_path = str(utils.abspath(mask_file))
- self.temp_files.add(mask_file_path)
-
- sections.append(
- ({"name": mask_file_path, "data": None, "is_file": True}, label)
- )
-
- return {"name": base_img_path, "data": None, "is_file": True}, sections
-
- def style(
- self,
- *,
- height: int | None = None,
- width: int | None = None,
- color_map: dict[str, str] | None = None,
- **kwargs,
- ):
- """
- This method is deprecated. Please set these arguments in the constructor instead.
- """
- warn_style_method_deprecation()
- if height is not None:
- self.height = height
- if width is not None:
- self.width = width
- if color_map is not None:
- self.color_map = color_map
- return self
diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b7554727.css b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b7554727.css
deleted file mode 100644
index 6042064e14607ebbe037f2a497bab442b590be17..0000000000000000000000000000000000000000
--- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-b7554727.css
+++ /dev/null
@@ -1 +0,0 @@
-.wrap.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{display:flex;flex-wrap:wrap;gap:var(--checkbox-label-gap)}label.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{display:flex;align-items:center;transition:var(--button-transition);cursor:pointer;box-shadow:var(--checkbox-label-shadow);border:var(--checkbox-label-border-width) solid var(--checkbox-label-border-color);border-radius:var(--button-small-radius);background:var(--checkbox-label-background-fill);padding:var(--checkbox-label-padding);color:var(--checkbox-label-text-color);font-weight:var(--checkbox-label-text-weight);font-size:var(--checkbox-label-text-size);line-height:var(--line-md)}label.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:hover{background:var(--checkbox-label-background-fill-hover)}label.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:focus{background:var(--checkbox-label-background-fill-focus)}label.selected.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{background:var(--checkbox-label-background-fill-selected);color:var(--checkbox-label-text-color-selected)}label.svelte-1p9xokt>.svelte-1p9xokt+.svelte-1p9xokt{margin-left:var(--size-2)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{--ring-color:transparent;position:relative;box-shadow:var(--checkbox-shadow);border:var(--checkbox-border-width) solid var(--checkbox-border-color);border-radius:var(--radius-full);background-color:var(--checkbox-background-color);line-height:var(--line-sm)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:checked,input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:checked:hover,input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:checked:focus{border-color:var(--checkbox-border-color-selected);background-image:var(--radio-circle);background-color:var(--checkbox-background-color-selected)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:hover{border-color:var(--checkbox-border-color-hover);background-color:var(--checkbox-background-color-hover)}input.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt:focus{border-color:var(--checkbox-border-color-focus);background-color:var(--checkbox-background-color-focus)}input[disabled].svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt,.disabled.svelte-1p9xokt.svelte-1p9xokt.svelte-1p9xokt{cursor:not-allowed}
diff --git a/spaces/deafheavennnn/metalproxy/README.md b/spaces/deafheavennnn/metalproxy/README.md
deleted file mode 100644
index e150cd67ca4cb6e9d77011fce185c3d7469fc66e..0000000000000000000000000000000000000000
--- a/spaces/deafheavennnn/metalproxy/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Metalproxy
-emoji: 📚
-colorFrom: indigo
-colorTo: pink
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/declare-lab/tango/diffusers/examples/controlnet/train_controlnet.py b/spaces/declare-lab/tango/diffusers/examples/controlnet/train_controlnet.py
deleted file mode 100644
index 20c4fbe189a15493f26727580b250e431bbb3f99..0000000000000000000000000000000000000000
--- a/spaces/declare-lab/tango/diffusers/examples/controlnet/train_controlnet.py
+++ /dev/null
@@ -1,1046 +0,0 @@
-#!/usr/bin/env python
-# coding=utf-8
-# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-
-import argparse
-import logging
-import math
-import os
-import random
-from pathlib import Path
-
-import accelerate
-import numpy as np
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint
-import transformers
-from accelerate import Accelerator
-from accelerate.logging import get_logger
-from accelerate.utils import ProjectConfiguration, set_seed
-from datasets import load_dataset
-from huggingface_hub import create_repo, upload_folder
-from packaging import version
-from PIL import Image
-from torchvision import transforms
-from tqdm.auto import tqdm
-from transformers import AutoTokenizer, PretrainedConfig
-
-import diffusers
-from diffusers import (
- AutoencoderKL,
- ControlNetModel,
- DDPMScheduler,
- StableDiffusionControlNetPipeline,
- UNet2DConditionModel,
- UniPCMultistepScheduler,
-)
-from diffusers.optimization import get_scheduler
-from diffusers.utils import check_min_version, is_wandb_available
-from diffusers.utils.import_utils import is_xformers_available
-
-
-if is_wandb_available():
- import wandb
-
-# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
-check_min_version("0.15.0.dev0")
-
-logger = get_logger(__name__)
-
-
-def log_validation(vae, text_encoder, tokenizer, unet, controlnet, args, accelerator, weight_dtype, step):
- logger.info("Running validation... ")
-
- controlnet = accelerator.unwrap_model(controlnet)
-
- pipeline = StableDiffusionControlNetPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- vae=vae,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- unet=unet,
- controlnet=controlnet,
- safety_checker=None,
- revision=args.revision,
- torch_dtype=weight_dtype,
- )
- pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
- pipeline = pipeline.to(accelerator.device)
- pipeline.set_progress_bar_config(disable=True)
-
- if args.enable_xformers_memory_efficient_attention:
- pipeline.enable_xformers_memory_efficient_attention()
-
- if args.seed is None:
- generator = None
- else:
- generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
-
- if len(args.validation_image) == len(args.validation_prompt):
- validation_images = args.validation_image
- validation_prompts = args.validation_prompt
- elif len(args.validation_image) == 1:
- validation_images = args.validation_image * len(args.validation_prompt)
- validation_prompts = args.validation_prompt
- elif len(args.validation_prompt) == 1:
- validation_images = args.validation_image
- validation_prompts = args.validation_prompt * len(args.validation_image)
- else:
- raise ValueError(
- "number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`"
- )
-
- image_logs = []
-
- for validation_prompt, validation_image in zip(validation_prompts, validation_images):
- validation_image = Image.open(validation_image).convert("RGB")
-
- images = []
-
- for _ in range(args.num_validation_images):
- with torch.autocast("cuda"):
- image = pipeline(
- validation_prompt, validation_image, num_inference_steps=20, generator=generator
- ).images[0]
-
- images.append(image)
-
- image_logs.append(
- {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt}
- )
-
- for tracker in accelerator.trackers:
- if tracker.name == "tensorboard":
- for log in image_logs:
- images = log["images"]
- validation_prompt = log["validation_prompt"]
- validation_image = log["validation_image"]
-
- formatted_images = []
-
- formatted_images.append(np.asarray(validation_image))
-
- for image in images:
- formatted_images.append(np.asarray(image))
-
- formatted_images = np.stack(formatted_images)
-
- tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC")
- elif tracker.name == "wandb":
- formatted_images = []
-
- for log in image_logs:
- images = log["images"]
- validation_prompt = log["validation_prompt"]
- validation_image = log["validation_image"]
-
- formatted_images.append(wandb.Image(validation_image, caption="Controlnet conditioning"))
-
- for image in images:
- image = wandb.Image(image, caption=validation_prompt)
- formatted_images.append(image)
-
- tracker.log({"validation": formatted_images})
- else:
- logger.warn(f"image logging not implemented for {tracker.name}")
-
-
-def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
- text_encoder_config = PretrainedConfig.from_pretrained(
- pretrained_model_name_or_path,
- subfolder="text_encoder",
- revision=revision,
- )
- model_class = text_encoder_config.architectures[0]
-
- if model_class == "CLIPTextModel":
- from transformers import CLIPTextModel
-
- return CLIPTextModel
- elif model_class == "RobertaSeriesModelWithTransformation":
- from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
-
- return RobertaSeriesModelWithTransformation
- else:
- raise ValueError(f"{model_class} is not supported.")
-
-
-def parse_args(input_args=None):
- parser = argparse.ArgumentParser(description="Simple example of a ControlNet training script.")
- parser.add_argument(
- "--pretrained_model_name_or_path",
- type=str,
- default=None,
- required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--controlnet_model_name_or_path",
- type=str,
- default=None,
- help="Path to pretrained controlnet model or model identifier from huggingface.co/models."
- " If not specified controlnet weights are initialized from unet.",
- )
- parser.add_argument(
- "--revision",
- type=str,
- default=None,
- required=False,
- help=(
- "Revision of pretrained model identifier from huggingface.co/models. Trainable model components should be"
- " float32 precision."
- ),
- )
- parser.add_argument(
- "--tokenizer_name",
- type=str,
- default=None,
- help="Pretrained tokenizer name or path if not the same as model_name",
- )
- parser.add_argument(
- "--output_dir",
- type=str,
- default="controlnet-model",
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument(
- "--cache_dir",
- type=str,
- default=None,
- help="The directory where the downloaded models and datasets will be stored.",
- )
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
- parser.add_argument(
- "--resolution",
- type=int,
- default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
- )
- parser.add_argument("--num_train_epochs", type=int, default=1)
- parser.add_argument(
- "--max_train_steps",
- type=int,
- default=None,
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
- )
- parser.add_argument(
- "--checkpointing_steps",
- type=int,
- default=500,
- help=(
- "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. "
- "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference."
- "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components."
- "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step"
- "instructions."
- ),
- )
- parser.add_argument(
- "--checkpoints_total_limit",
- type=int,
- default=None,
- help=(
- "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
- " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
- " for more details"
- ),
- )
- parser.add_argument(
- "--resume_from_checkpoint",
- type=str,
- default=None,
- help=(
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
- ),
- )
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
- parser.add_argument(
- "--gradient_checkpointing",
- action="store_true",
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
- )
- parser.add_argument(
- "--learning_rate",
- type=float,
- default=5e-6,
- help="Initial learning rate (after the potential warmup period) to use.",
- )
- parser.add_argument(
- "--scale_lr",
- action="store_true",
- default=False,
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
- )
- parser.add_argument(
- "--lr_scheduler",
- type=str,
- default="constant",
- help=(
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
- ' "constant", "constant_with_warmup"]'
- ),
- )
- parser.add_argument(
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
- )
- parser.add_argument(
- "--lr_num_cycles",
- type=int,
- default=1,
- help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
- )
- parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
- parser.add_argument(
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
- )
- parser.add_argument(
- "--dataloader_num_workers",
- type=int,
- default=0,
- help=(
- "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
- ),
- )
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
- parser.add_argument(
- "--hub_model_id",
- type=str,
- default=None,
- help="The name of the repository to keep in sync with the local `output_dir`.",
- )
- parser.add_argument(
- "--logging_dir",
- type=str,
- default="logs",
- help=(
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
- ),
- )
- parser.add_argument(
- "--allow_tf32",
- action="store_true",
- help=(
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
- ),
- )
- parser.add_argument(
- "--report_to",
- type=str,
- default="tensorboard",
- help=(
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
- ),
- )
- parser.add_argument(
- "--mixed_precision",
- type=str,
- default=None,
- choices=["no", "fp16", "bf16"],
- help=(
- "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
- " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
- " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
- ),
- )
- parser.add_argument(
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
- )
- parser.add_argument(
- "--set_grads_to_none",
- action="store_true",
- help=(
- "Save more memory by using setting grads to None instead of zero. Be aware, that this changes certain"
- " behaviors, so disable this argument if it causes any problems. More info:"
- " https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html"
- ),
- )
- parser.add_argument(
- "--dataset_name",
- type=str,
- default=None,
- help=(
- "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
- " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
- " or to a folder containing files that 🤗 Datasets can understand."
- ),
- )
- parser.add_argument(
- "--dataset_config_name",
- type=str,
- default=None,
- help="The config of the Dataset, leave as None if there's only one config.",
- )
- parser.add_argument(
- "--train_data_dir",
- type=str,
- default=None,
- help=(
- "A folder containing the training data. Folder contents must follow the structure described in"
- " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
- " must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
- ),
- )
- parser.add_argument(
- "--image_column", type=str, default="image", help="The column of the dataset containing the target image."
- )
- parser.add_argument(
- "--conditioning_image_column",
- type=str,
- default="conditioning_image",
- help="The column of the dataset containing the controlnet conditioning image.",
- )
- parser.add_argument(
- "--caption_column",
- type=str,
- default="text",
- help="The column of the dataset containing a caption or a list of captions.",
- )
- parser.add_argument(
- "--max_train_samples",
- type=int,
- default=None,
- help=(
- "For debugging purposes or quicker training, truncate the number of training examples to this "
- "value if set."
- ),
- )
- parser.add_argument(
- "--proportion_empty_prompts",
- type=float,
- default=0,
- help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).",
- )
- parser.add_argument(
- "--validation_prompt",
- type=str,
- default=None,
- nargs="+",
- help=(
- "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`."
- " Provide either a matching number of `--validation_image`s, a single `--validation_image`"
- " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s."
- ),
- )
- parser.add_argument(
- "--validation_image",
- type=str,
- default=None,
- nargs="+",
- help=(
- "A set of paths to the controlnet conditioning image be evaluated every `--validation_steps`"
- " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a"
- " a single `--validation_prompt` to be used with all `--validation_image`s, or a single"
- " `--validation_image` that will be used with all `--validation_prompt`s."
- ),
- )
- parser.add_argument(
- "--num_validation_images",
- type=int,
- default=4,
- help="Number of images to be generated for each `--validation_image`, `--validation_prompt` pair",
- )
- parser.add_argument(
- "--validation_steps",
- type=int,
- default=100,
- help=(
- "Run validation every X steps. Validation consists of running the prompt"
- " `args.validation_prompt` multiple times: `args.num_validation_images`"
- " and logging the images."
- ),
- )
- parser.add_argument(
- "--tracker_project_name",
- type=str,
- default="train_controlnet",
- required=True,
- help=(
- "The `project_name` argument passed to Accelerator.init_trackers for"
- " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator"
- ),
- )
-
- if input_args is not None:
- args = parser.parse_args(input_args)
- else:
- args = parser.parse_args()
-
- if args.dataset_name is None and args.train_data_dir is None:
- raise ValueError("Specify either `--dataset_name` or `--train_data_dir`")
-
- if args.dataset_name is not None and args.train_data_dir is not None:
- raise ValueError("Specify only one of `--dataset_name` or `--train_data_dir`")
-
- if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1:
- raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].")
-
- if args.validation_prompt is not None and args.validation_image is None:
- raise ValueError("`--validation_image` must be set if `--validation_prompt` is set")
-
- if args.validation_prompt is None and args.validation_image is not None:
- raise ValueError("`--validation_prompt` must be set if `--validation_image` is set")
-
- if (
- args.validation_image is not None
- and args.validation_prompt is not None
- and len(args.validation_image) != 1
- and len(args.validation_prompt) != 1
- and len(args.validation_image) != len(args.validation_prompt)
- ):
- raise ValueError(
- "Must provide either 1 `--validation_image`, 1 `--validation_prompt`,"
- " or the same number of `--validation_prompt`s and `--validation_image`s"
- )
-
- return args
-
-
-def make_train_dataset(args, tokenizer, accelerator):
- # Get the datasets: you can either provide your own training and evaluation files (see below)
- # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
-
- # In distributed training, the load_dataset function guarantees that only one local process can concurrently
- # download the dataset.
- if args.dataset_name is not None:
- # Downloading and loading a dataset from the hub.
- dataset = load_dataset(
- args.dataset_name,
- args.dataset_config_name,
- cache_dir=args.cache_dir,
- )
- else:
- if args.train_data_dir is not None:
- dataset = load_dataset(
- args.train_data_dir,
- cache_dir=args.cache_dir,
- )
- # See more about loading custom images at
- # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script
-
- # Preprocessing the datasets.
- # We need to tokenize inputs and targets.
- column_names = dataset["train"].column_names
-
- # 6. Get the column names for input/target.
- if args.image_column is None:
- image_column = column_names[0]
- logger.info(f"image column defaulting to {image_column}")
- else:
- image_column = args.image_column
- if image_column not in column_names:
- raise ValueError(
- f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
- )
-
- if args.caption_column is None:
- caption_column = column_names[1]
- logger.info(f"caption column defaulting to {caption_column}")
- else:
- caption_column = args.caption_column
- if caption_column not in column_names:
- raise ValueError(
- f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
- )
-
- if args.conditioning_image_column is None:
- conditioning_image_column = column_names[2]
- logger.info(f"conditioning image column defaulting to {caption_column}")
- else:
- conditioning_image_column = args.conditioning_image_column
- if conditioning_image_column not in column_names:
- raise ValueError(
- f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
- )
-
- def tokenize_captions(examples, is_train=True):
- captions = []
- for caption in examples[caption_column]:
- if random.random() < args.proportion_empty_prompts:
- captions.append("")
- elif isinstance(caption, str):
- captions.append(caption)
- elif isinstance(caption, (list, np.ndarray)):
- # take a random caption if there are multiple
- captions.append(random.choice(caption) if is_train else caption[0])
- else:
- raise ValueError(
- f"Caption column `{caption_column}` should contain either strings or lists of strings."
- )
- inputs = tokenizer(
- captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
- )
- return inputs.input_ids
-
- image_transforms = transforms.Compose(
- [
- transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
- transforms.ToTensor(),
- transforms.Normalize([0.5], [0.5]),
- ]
- )
-
- conditioning_image_transforms = transforms.Compose(
- [
- transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
- transforms.ToTensor(),
- ]
- )
-
- def preprocess_train(examples):
- images = [image.convert("RGB") for image in examples[image_column]]
- images = [image_transforms(image) for image in images]
-
- conditioning_images = [image.convert("RGB") for image in examples[conditioning_image_column]]
- conditioning_images = [conditioning_image_transforms(image) for image in conditioning_images]
-
- examples["pixel_values"] = images
- examples["conditioning_pixel_values"] = conditioning_images
- examples["input_ids"] = tokenize_captions(examples)
-
- return examples
-
- with accelerator.main_process_first():
- if args.max_train_samples is not None:
- dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
- # Set the training transforms
- train_dataset = dataset["train"].with_transform(preprocess_train)
-
- return train_dataset
-
-
-def collate_fn(examples):
- pixel_values = torch.stack([example["pixel_values"] for example in examples])
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
-
- conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples])
- conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float()
-
- input_ids = torch.stack([example["input_ids"] for example in examples])
-
- return {
- "pixel_values": pixel_values,
- "conditioning_pixel_values": conditioning_pixel_values,
- "input_ids": input_ids,
- }
-
-
-def main(args):
- logging_dir = Path(args.output_dir, args.logging_dir)
-
- accelerator_project_config = ProjectConfiguration(total_limit=args.checkpoints_total_limit)
-
- accelerator = Accelerator(
- gradient_accumulation_steps=args.gradient_accumulation_steps,
- mixed_precision=args.mixed_precision,
- log_with=args.report_to,
- logging_dir=logging_dir,
- project_config=accelerator_project_config,
- )
-
- # Make one log on every process with the configuration for debugging.
- logging.basicConfig(
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
- datefmt="%m/%d/%Y %H:%M:%S",
- level=logging.INFO,
- )
- logger.info(accelerator.state, main_process_only=False)
- if accelerator.is_local_main_process:
- transformers.utils.logging.set_verbosity_warning()
- diffusers.utils.logging.set_verbosity_info()
- else:
- transformers.utils.logging.set_verbosity_error()
- diffusers.utils.logging.set_verbosity_error()
-
- # If passed along, set the training seed now.
- if args.seed is not None:
- set_seed(args.seed)
-
- # Handle the repository creation
- if accelerator.is_main_process:
- if args.output_dir is not None:
- os.makedirs(args.output_dir, exist_ok=True)
-
- if args.push_to_hub:
- repo_id = create_repo(
- repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
- ).repo_id
-
- # Load the tokenizer
- if args.tokenizer_name:
- tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
- elif args.pretrained_model_name_or_path:
- tokenizer = AutoTokenizer.from_pretrained(
- args.pretrained_model_name_or_path,
- subfolder="tokenizer",
- revision=args.revision,
- use_fast=False,
- )
-
- # import correct text encoder class
- text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
-
- # Load scheduler and models
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
- text_encoder = text_encoder_cls.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
- )
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
- unet = UNet2DConditionModel.from_pretrained(
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
- )
-
- if args.controlnet_model_name_or_path:
- logger.info("Loading existing controlnet weights")
- controlnet = ControlNetModel.from_pretrained(args.controlnet_model_name_or_path)
- else:
- logger.info("Initializing controlnet weights from unet")
- controlnet = ControlNetModel.from_unet(unet)
-
- # `accelerate` 0.16.0 will have better support for customized saving
- if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
- # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
- def save_model_hook(models, weights, output_dir):
- i = len(weights) - 1
-
- while len(weights) > 0:
- weights.pop()
- model = models[i]
-
- sub_dir = "controlnet"
- model.save_pretrained(os.path.join(output_dir, sub_dir))
-
- i -= 1
-
- def load_model_hook(models, input_dir):
- while len(models) > 0:
- # pop models so that they are not loaded again
- model = models.pop()
-
- # load diffusers style into model
- load_model = ControlNetModel.from_pretrained(input_dir, subfolder="controlnet")
- model.register_to_config(**load_model.config)
-
- model.load_state_dict(load_model.state_dict())
- del load_model
-
- accelerator.register_save_state_pre_hook(save_model_hook)
- accelerator.register_load_state_pre_hook(load_model_hook)
-
- vae.requires_grad_(False)
- unet.requires_grad_(False)
- text_encoder.requires_grad_(False)
- controlnet.train()
-
- if args.enable_xformers_memory_efficient_attention:
- if is_xformers_available():
- import xformers
-
- xformers_version = version.parse(xformers.__version__)
- if xformers_version == version.parse("0.0.16"):
- logger.warn(
- "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
- )
- unet.enable_xformers_memory_efficient_attention()
- controlnet.enable_xformers_memory_efficient_attention()
- else:
- raise ValueError("xformers is not available. Make sure it is installed correctly")
-
- if args.gradient_checkpointing:
- controlnet.enable_gradient_checkpointing()
-
- # Check that all trainable models are in full precision
- low_precision_error_string = (
- " Please make sure to always have all model weights in full float32 precision when starting training - even if"
- " doing mixed precision training, copy of the weights should still be float32."
- )
-
- if accelerator.unwrap_model(controlnet).dtype != torch.float32:
- raise ValueError(
- f"Controlnet loaded as datatype {accelerator.unwrap_model(controlnet).dtype}. {low_precision_error_string}"
- )
-
- # Enable TF32 for faster training on Ampere GPUs,
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
- if args.allow_tf32:
- torch.backends.cuda.matmul.allow_tf32 = True
-
- if args.scale_lr:
- args.learning_rate = (
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
- )
-
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
- if args.use_8bit_adam:
- try:
- import bitsandbytes as bnb
- except ImportError:
- raise ImportError(
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
- )
-
- optimizer_class = bnb.optim.AdamW8bit
- else:
- optimizer_class = torch.optim.AdamW
-
- # Optimizer creation
- params_to_optimize = controlnet.parameters()
- optimizer = optimizer_class(
- params_to_optimize,
- lr=args.learning_rate,
- betas=(args.adam_beta1, args.adam_beta2),
- weight_decay=args.adam_weight_decay,
- eps=args.adam_epsilon,
- )
-
- train_dataset = make_train_dataset(args, tokenizer, accelerator)
-
- train_dataloader = torch.utils.data.DataLoader(
- train_dataset,
- shuffle=True,
- collate_fn=collate_fn,
- batch_size=args.train_batch_size,
- num_workers=args.dataloader_num_workers,
- )
-
- # Scheduler and math around the number of training steps.
- overrode_max_train_steps = False
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if args.max_train_steps is None:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- overrode_max_train_steps = True
-
- lr_scheduler = get_scheduler(
- args.lr_scheduler,
- optimizer=optimizer,
- num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
- num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
- num_cycles=args.lr_num_cycles,
- power=args.lr_power,
- )
-
- # Prepare everything with our `accelerator`.
- controlnet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- controlnet, optimizer, train_dataloader, lr_scheduler
- )
-
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
- # as these models are only used for inference, keeping weights in full precision is not required.
- weight_dtype = torch.float32
- if accelerator.mixed_precision == "fp16":
- weight_dtype = torch.float16
- elif accelerator.mixed_precision == "bf16":
- weight_dtype = torch.bfloat16
-
- # Move vae, unet and text_encoder to device and cast to weight_dtype
- vae.to(accelerator.device, dtype=weight_dtype)
- unet.to(accelerator.device, dtype=weight_dtype)
- text_encoder.to(accelerator.device, dtype=weight_dtype)
-
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if overrode_max_train_steps:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- # Afterwards we recalculate our number of training epochs
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
-
- # We need to initialize the trackers we use, and also store our configuration.
- # The trackers initializes automatically on the main process.
- if accelerator.is_main_process:
- tracker_config = dict(vars(args))
-
- # tensorboard cannot handle list types for config
- tracker_config.pop("validation_prompt")
- tracker_config.pop("validation_image")
-
- accelerator.init_trackers(args.tracker_project_name, config=tracker_config)
-
- # Train!
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
-
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {len(train_dataset)}")
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
- logger.info(f" Num Epochs = {args.num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {args.max_train_steps}")
- global_step = 0
- first_epoch = 0
-
- # Potentially load in the weights and states from a previous save
- if args.resume_from_checkpoint:
- if args.resume_from_checkpoint != "latest":
- path = os.path.basename(args.resume_from_checkpoint)
- else:
- # Get the most recent checkpoint
- dirs = os.listdir(args.output_dir)
- dirs = [d for d in dirs if d.startswith("checkpoint")]
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
- path = dirs[-1] if len(dirs) > 0 else None
-
- if path is None:
- accelerator.print(
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
- )
- args.resume_from_checkpoint = None
- initial_global_step = 0
- else:
- accelerator.print(f"Resuming from checkpoint {path}")
- accelerator.load_state(os.path.join(args.output_dir, path))
- global_step = int(path.split("-")[1])
-
- initial_global_step = global_step * args.gradient_accumulation_steps
- first_epoch = global_step // num_update_steps_per_epoch
- else:
- initial_global_step = 0
-
- progress_bar = tqdm(
- range(0, args.max_train_steps),
- initial=initial_global_step,
- desc="Steps",
- # Only show the progress bar once on each machine.
- disable=not accelerator.is_local_main_process,
- )
-
- for epoch in range(first_epoch, args.num_train_epochs):
- for step, batch in enumerate(train_dataloader):
- with accelerator.accumulate(controlnet):
- # Convert images to latent space
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
- latents = latents * vae.config.scaling_factor
-
- # Sample noise that we'll add to the latents
- noise = torch.randn_like(latents)
- bsz = latents.shape[0]
- # Sample a random timestep for each image
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
- timesteps = timesteps.long()
-
- # Add noise to the latents according to the noise magnitude at each timestep
- # (this is the forward diffusion process)
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
-
- # Get the text embedding for conditioning
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
-
- controlnet_image = batch["conditioning_pixel_values"].to(dtype=weight_dtype)
-
- down_block_res_samples, mid_block_res_sample = controlnet(
- noisy_latents,
- timesteps,
- encoder_hidden_states=encoder_hidden_states,
- controlnet_cond=controlnet_image,
- return_dict=False,
- )
-
- # Predict the noise residual
- model_pred = unet(
- noisy_latents,
- timesteps,
- encoder_hidden_states=encoder_hidden_states,
- down_block_additional_residuals=down_block_res_samples,
- mid_block_additional_residual=mid_block_res_sample,
- ).sample
-
- # Get the target for loss depending on the prediction type
- if noise_scheduler.config.prediction_type == "epsilon":
- target = noise
- elif noise_scheduler.config.prediction_type == "v_prediction":
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
- else:
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
-
- accelerator.backward(loss)
- if accelerator.sync_gradients:
- params_to_clip = controlnet.parameters()
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
- optimizer.step()
- lr_scheduler.step()
- optimizer.zero_grad(set_to_none=args.set_grads_to_none)
-
- # Checks if the accelerator has performed an optimization step behind the scenes
- if accelerator.sync_gradients:
- progress_bar.update(1)
- global_step += 1
-
- if accelerator.is_main_process:
- if global_step % args.checkpointing_steps == 0:
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
- accelerator.save_state(save_path)
- logger.info(f"Saved state to {save_path}")
-
- if args.validation_prompt is not None and global_step % args.validation_steps == 0:
- log_validation(
- vae,
- text_encoder,
- tokenizer,
- unet,
- controlnet,
- args,
- accelerator,
- weight_dtype,
- global_step,
- )
-
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
- progress_bar.set_postfix(**logs)
- accelerator.log(logs, step=global_step)
-
- if global_step >= args.max_train_steps:
- break
-
- # Create the pipeline using using the trained modules and save it.
- accelerator.wait_for_everyone()
- if accelerator.is_main_process:
- controlnet = accelerator.unwrap_model(controlnet)
- controlnet.save_pretrained(args.output_dir)
-
- if args.push_to_hub:
- upload_folder(
- repo_id=repo_id,
- folder_path=args.output_dir,
- commit_message="End of training",
- ignore_patterns=["step_*", "epoch_*"],
- )
-
- accelerator.end_training()
-
-
-if __name__ == "__main__":
- args = parse_args()
- main(args)
diff --git a/spaces/decodemai/business_tech_ideas/app.py b/spaces/decodemai/business_tech_ideas/app.py
deleted file mode 100644
index a42823a1c6c3c71c8f06f7264187e4b9ef9375d1..0000000000000000000000000000000000000000
--- a/spaces/decodemai/business_tech_ideas/app.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import json
-import requests
-import gradio as gr
-import random
-import time
-import os
-import datetime
-from datetime import datetime
-
-
-API_TOKEN = os.getenv("API_TOKEN")
-DECODEM_TOKEN=os.getenv("DECODEM_TOKEN")
-
-
-from huggingface_hub import InferenceApi
-inference = InferenceApi("bigscience/bloom",token=API_TOKEN)
-
-headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
-url_decodemprompts='https://us-central1-createinsightsproject.cloudfunctions.net/getdecodemprompts'
-
-data={"prompt_type":'business_tech_ideas',"decodem_token":DECODEM_TOKEN}
-try:
- r = requests.post(url_decodemprompts, data=json.dumps(data), headers=headers)
-except requests.exceptions.ReadTimeout as e:
- print(e)
-#print(r.content)
-
-prompt=str(r.content, 'UTF-8')
-
-def infer(prompt,
- max_length = 250,
- top_k = 0,
- num_beams = 0,
- no_repeat_ngram_size = 2,
- top_p = 0.9,
- seed=42,
- temperature=0.7,
- greedy_decoding = False,
- return_full_text = False):
-
- print(seed)
- top_k = None if top_k == 0 else top_k
- do_sample = False if num_beams > 0 else not greedy_decoding
- num_beams = None if (greedy_decoding or num_beams == 0) else num_beams
- no_repeat_ngram_size = None if num_beams is None else no_repeat_ngram_size
- top_p = None if num_beams else top_p
- early_stopping = None if num_beams is None else num_beams > 0
-
- params = {
- "max_new_tokens": max_length,
- "top_k": top_k,
- "top_p": top_p,
- "temperature": temperature,
- "do_sample": do_sample,
- "seed": seed,
- "early_stopping":early_stopping,
- "no_repeat_ngram_size":no_repeat_ngram_size,
- "num_beams":num_beams,
- "return_full_text":return_full_text
- }
-
- s = time.time()
- response = inference(prompt, params=params)
- #print(response)
- proc_time = time.time()-s
- #print(f"Processing time was {proc_time} seconds")
- return response
-
-def getideas(text_inp):
- print(text_inp)
- print(datetime.today().strftime("%d-%m-%Y"))
-
- text = prompt+"\nInput:"+text_inp + "\nOutput:"
- resp = infer(text,seed=random.randint(0,100))
-
- generated_text=resp[0]['generated_text']
- result = generated_text.replace(text,'').strip()
- result = result.replace("Output:","")
- parts = result.split("###")
- topic = parts[0].strip()
- topic="\n".join(topic.split('\n')[:3])
- print(topic)
- return(topic)
-
-with gr.Blocks() as demo:
- gr.Markdown("
Tech Ideas for Your Business
")
- gr.Markdown(
- """ChatGPT based Insights from Decodem.ai for businesses.\nWhile ChatGPT has multiple use cases we have evolved specific use cases/ templates for businesses \n\n This template provides ideas on how a business would look like in the future. Enter a business area and get the results. We use a equally powerful AI model bigscience/bloom."""
- )
- textbox = gr.Textbox(placeholder="Enter business type here...", lines=1,label='Your business area')
- btn = gr.Button("Generate")
- output1 = gr.Textbox(lines=2,label='The future')
-
- btn.click(getideas,inputs=[textbox], outputs=[output1])
- examples = gr.Examples(examples=['icecream parlor','space travel','book shop','ecommerce','grocery delivery'],
- inputs=[textbox])
-
-
-demo.launch()
\ No newline at end of file
diff --git a/spaces/derek-thomas/RAGDemo/app.py b/spaces/derek-thomas/RAGDemo/app.py
deleted file mode 100644
index 132e48e0905717fd051c51afd568332c4c36a17d..0000000000000000000000000000000000000000
--- a/spaces/derek-thomas/RAGDemo/app.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import subprocess
-
-subprocess.run(["pip", "install", "--upgrade", "transformers[torch,sentencepiece]==4.34.1"])
-
-from functools import partial
-import logging
-from pathlib import Path
-from time import perf_counter
-
-import gradio as gr
-from jinja2 import Environment, FileSystemLoader
-
-from backend.query_llm import generate
-from backend.semantic_search import qd_retriever
-
-proj_dir = Path(__file__).parent
-# Setting up the logging
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-# Set up the template environment with the templates directory
-env = Environment(loader=FileSystemLoader(proj_dir / 'templates'))
-
-# Load the templates directly from the environment
-template = env.get_template('template.j2')
-template_html = env.get_template('template_html.j2')
-
-# Examples
-examples = ['What is the capital of China?',
- 'Why is the sky blue?',
- 'Who won the mens world cup in 2014?', ]
-
-
-def add_text(history, text):
- history = [] if history is None else history
- history = history + [(text, None)]
- return history, gr.Textbox(value="", interactive=False)
-
-
-def bot(history, hyde=False):
- top_k = 4
- query = history[-1][0]
-
- logger.warning('Retrieving documents...')
- # Retrieve documents relevant to query
- document_start = perf_counter()
- if hyde:
- hyde_document = ""
- generator = generate(f"Write a wikipedia article intro paragraph to answer this query: {query}", history)
- for output_chunk in generator:
- hyde_document = output_chunk
-
- logger.warning(hyde_document)
- documents = qd_retriever.retrieve(hyde_document, top_k=top_k)
- else:
- documents = qd_retriever.retrieve(query, top_k=top_k)
- document_time = perf_counter() - document_start
- logger.warning(f'Finished Retrieving documents in {round(document_time, 2)} seconds...')
-
- # Create Prompt
- prompt = template.render(documents=documents, query=query)
- prompt_html = template_html.render(documents=documents, query=query)
-
- history[-1][1] = ""
- for character in generate(prompt, history[:-1]):
- history[-1][1] = character
- yield history, prompt_html
-
-
-with gr.Blocks() as demo:
- with gr.Tab("RAGDemo"):
- chatbot = gr.Chatbot(
- [],
- elem_id="chatbot",
- avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg',
- 'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'),
- bubble_full_width=False,
- show_copy_button=True,
- show_share_button=True,
- )
-
- with gr.Row():
- txt = gr.Textbox(
- scale=3,
- show_label=False,
- placeholder="Enter text and press enter",
- container=False,
- )
- txt_btn = gr.Button(value="Submit text", scale=1)
-
- # Examples
- gr.Examples(examples, txt)
-
- prompt_html = gr.HTML()
- # Turn off interactivity while generating if you click
- txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
- bot, chatbot, [chatbot, prompt_html])
-
- # Turn it back on
- txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
-
- # Turn off interactivity while generating if you hit enter
- txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
- bot, chatbot, [chatbot, prompt_html])
-
- # Turn it back on
- txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
-
- with gr.Tab("RAGDemo + HyDE"):
- hyde_chatbot = gr.Chatbot(
- [],
- elem_id="chatbot",
- avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg',
- 'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'),
- bubble_full_width=False,
- show_copy_button=True,
- show_share_button=True,
- )
-
- with gr.Row():
- hyde_txt = gr.Textbox(
- scale=3,
- show_label=False,
- placeholder="Enter text and press enter",
- container=False,
- )
- hyde_txt_btn = gr.Button(value="Submit text", scale=1)
-
- # Examples
- gr.Examples(examples, hyde_txt)
-
- hyde_prompt_html = gr.HTML()
- # Turn off interactivity while generating if you click
- hyde_txt_msg = hyde_txt_btn.click(add_text, [hyde_chatbot, hyde_txt], [hyde_chatbot, hyde_txt], queue=False).then(
- partial(bot, hyde=True), [hyde_chatbot], [hyde_chatbot, hyde_prompt_html])
-
- # Turn it back on
- hyde_txt_msg.then(lambda: gr.Textbox(interactive=True), None, [hyde_txt], queue=False)
-
- # Turn off interactivity while generating if you hit enter
- hyde_txt_msg = hyde_txt.submit(add_text, [hyde_chatbot, hyde_txt], [hyde_chatbot, hyde_txt], queue=False).then(
- partial(bot, hyde=True), [hyde_chatbot], [hyde_chatbot, hyde_prompt_html])
-
- # Turn it back on
- hyde_txt_msg.then(lambda: gr.Textbox(interactive=True), None, [hyde_txt], queue=False)
-
-demo.queue()
-demo.launch(debug=True)
diff --git a/spaces/devduttabain/facebook-musicgen-small/README.md b/spaces/devduttabain/facebook-musicgen-small/README.md
deleted file mode 100644
index c6caf022aeaa33a433565b807a46088c790981ce..0000000000000000000000000000000000000000
--- a/spaces/devduttabain/facebook-musicgen-small/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Facebook Musicgen Small
-emoji: 📉
-colorFrom: green
-colorTo: pink
-sdk: gradio
-sdk_version: 3.35.2
-app_file: app.py
-pinned: false
-license: unknown
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/diacanFperku/AutoGPT/Ezx-Number-1-Hits-Keygen-BEST-35.md b/spaces/diacanFperku/AutoGPT/Ezx-Number-1-Hits-Keygen-BEST-35.md
deleted file mode 100644
index 5236784bdea012cbae4493abbba054d8b16c5128..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Ezx-Number-1-Hits-Keygen-BEST-35.md
+++ /dev/null
@@ -1,33 +0,0 @@
-ezx number 1 hits keygen 35
-
-
-
-CLICK HERE ::: [https://maudaracte.blogspot.com/?file=2tvJdC](https://maudaracte.blogspot.com/?file=2tvJdC)
-
-
-
-
-
-
-
-
-
-Here is a possible title and article with html formatting for the keyword "ezx number 1 hits keygen 35":
-
-How to Use EZX Number 1 Hits Keygen 35 to Create Amazing Drum Tracks
-EZX Number 1 Hits is a sound expansion pack for Toontrack EZ Drummer and Superior Drummer that features a fusion of organic, electronic, high-tech and vintage drum sounds. It covers classic sounds from machines like Linn, TR-909, TR-808, DR55 and more, as well as custom designed acoustic sounds produced and mixed by Grammy Award-Winner Niklas Flyckt (Britney Spears -'Toxic').
-If you want to create contemporary pop, dance, house or hip hop drum tracks with EZX Number 1 Hits, you will need a keygen to unlock the full potential of this amazing product. A keygen is a software tool that generates a unique serial number or activation code for a software product. With a keygen, you can bypass the copy protection and use the software without any limitations.
-However, finding a reliable and working keygen for EZX Number 1 Hits is not easy. There are many fake or malicious keygens on the internet that can harm your computer or steal your personal information. That's why we have created this guide to help you find and use EZX Number 1 Hits Keygen 35, which is one of the best and safest keygens available for this product.
-Step 1: Download EZX Number 1 Hits Keygen 35
-The first step is to download EZX Number 1 Hits Keygen 35 from a trusted source. You can find it on bitbucket.org[^1^], iscribble.org[^2^] or soundcloud.com[^3^]. These are some of the websites that offer this keygen for free and without any viruses or malware. Just click on the link and follow the instructions to download the keygen file.
-Step 2: Run EZX Number 1 Hits Keygen 35
-The next step is to run EZX Number 1 Hits Keygen 35 on your computer. You will need to extract the keygen file from the zip archive first. Then, double-click on the keygen.exe file to launch it. You will see a simple interface with a button that says "Generate". Click on it and wait for a few seconds until the keygen generates a serial number for EZX Number 1 Hits.
-Step 3: Activate EZX Number 1 Hits with the Serial Number
-The final step is to activate EZX Number 1 Hits with the serial number that you got from the keygen. You will need to have EZ Drummer or Superior Drummer installed on your computer first. Then, open the software and go to the settings menu. There, you will find an option to enter your serial number for EZX Number 1 Hits. Paste the serial number that you copied from the keygen and click on "OK". The software will verify your serial number and activate EZX Number 1 Hits for you.
-Enjoy Creating Amazing Drum Tracks with EZX Number 1 Hits
-Congratulations! You have successfully activated EZX Number 1 Hits with EZX Number 1 Hits Keygen 35. Now you can enjoy creating amazing drum tracks with this sound expansion pack. You can choose from different kits, sounds and effects to suit your musical style and genre. You can also use the MIDI files that come with EZX Number 1 Hits to get some inspiration or modify them to your liking.
-EZX Number 1 Hits is a one-stop-shop for instant drum production. It has all the timeless sounds that have been used in countless hits over the years, as well as some fresh and modern sounds that will make your tracks stand out. With EZX Number 1 Hits Keygen 35, you can unlock all these sounds and use them without any restrictions.
-So what are you waiting for? Download EZX Number 1 Hits Keygen 35 today and start making some number one hits! dfd1c89656
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Fuoco Nella Stiva 2 Movie In Italian Free Download ((NEW)) Mp4.md b/spaces/diacanFperku/AutoGPT/Fuoco Nella Stiva 2 Movie In Italian Free Download ((NEW)) Mp4.md
deleted file mode 100644
index 919aeadca12b093183392d20509818b125fd2fc1..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Fuoco Nella Stiva 2 Movie In Italian Free Download ((NEW)) Mp4.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Fuoco nella stiva 2 movie in italian free download mp4
-
-Ya haider mp4 download ... Linking words worksheet 2 ... SN Betting Reviews Free betting Bet In Play Mobile Betting Head 2 Head P2P Betting Betting ... League Spain La Liga Italy Serie A Germany Bundesliga Europe Transfers USA MLS Like ... Tips may refer to: Tips Industries, an Indian film production company Tips ... 1fdad05405
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Mankatha Movie !!INSTALL!! Download Tamilrockers 26.md b/spaces/diacanFperku/AutoGPT/Mankatha Movie !!INSTALL!! Download Tamilrockers 26.md
deleted file mode 100644
index fdcb3c7da323a652ccfb52a5ca3574cc85912ddc..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Mankatha Movie !!INSTALL!! Download Tamilrockers 26.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-Mankata (translation of Gambling) is a 2011 Indian action thriller in Tamil written and directed by Venkat Prabhu. It stars Ajit Kumar, who is turning 50 years old. The film has an adventurous plot where the main characters fight against corrupt military officials who want to seize and steal money from the bank and fight them to restore justice.
-The film is set in the spirit of Prabhu's previous films (who won the 2010 Best Action Movie award) - it's fast-paced, tense and keeps you on your toes until the very end.
-The plot is simple, but it allows the actors to showcase their skills and showcase their charisma. 8a78ff9644
-
-
-
diff --git a/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/commons.py b/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/commons.py
deleted file mode 100644
index 9ad0444b61cbadaa388619986c2889c707d873ce..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/commons.py
+++ /dev/null
@@ -1,161 +0,0 @@
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-
-def init_weights(m, mean=0.0, std=0.01):
- classname = m.__class__.__name__
- if classname.find("Conv") != -1:
- m.weight.data.normal_(mean, std)
-
-
-def get_padding(kernel_size, dilation=1):
- return int((kernel_size*dilation - dilation)/2)
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def intersperse(lst, item):
- result = [item] * (len(lst) * 2 + 1)
- result[1::2] = lst
- return result
-
-
-def kl_divergence(m_p, logs_p, m_q, logs_q):
- """KL(P||Q)"""
- kl = (logs_q - logs_p) - 0.5
- kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q)
- return kl
-
-
-def rand_gumbel(shape):
- """Sample from the Gumbel distribution, protect from overflows."""
- uniform_samples = torch.rand(shape) * 0.99998 + 0.00001
- return -torch.log(-torch.log(uniform_samples))
-
-
-def rand_gumbel_like(x):
- g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device)
- return g
-
-
-def slice_segments(x, ids_str, segment_size=4):
- ret = torch.zeros_like(x[:, :, :segment_size])
- for i in range(x.size(0)):
- idx_str = ids_str[i]
- idx_end = idx_str + segment_size
- ret[i] = x[i, :, idx_str:idx_end]
- return ret
-
-
-def rand_slice_segments(x, x_lengths=None, segment_size=4):
- b, d, t = x.size()
- if x_lengths is None:
- x_lengths = t
- ids_str_max = x_lengths - segment_size + 1
- ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long)
- ret = slice_segments(x, ids_str, segment_size)
- return ret, ids_str
-
-
-def get_timing_signal_1d(
- length, channels, min_timescale=1.0, max_timescale=1.0e4):
- position = torch.arange(length, dtype=torch.float)
- num_timescales = channels // 2
- log_timescale_increment = (
- math.log(float(max_timescale) / float(min_timescale)) /
- (num_timescales - 1))
- inv_timescales = min_timescale * torch.exp(
- torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment)
- scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1)
- signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0)
- signal = F.pad(signal, [0, 0, 0, channels % 2])
- signal = signal.view(1, channels, length)
- return signal
-
-
-def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return x + signal.to(dtype=x.dtype, device=x.device)
-
-
-def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1):
- b, channels, length = x.size()
- signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale)
- return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis)
-
-
-def subsequent_mask(length):
- mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0)
- return mask
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-
-def convert_pad_shape(pad_shape):
- l = pad_shape[::-1]
- pad_shape = [item for sublist in l for item in sublist]
- return pad_shape
-
-
-def shift_1d(x):
- x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
- return x
-
-
-def sequence_mask(length, max_length=None):
- if max_length is None:
- max_length = length.max()
- x = torch.arange(max_length, dtype=length.dtype, device=length.device)
- return x.unsqueeze(0) < length.unsqueeze(1)
-
-
-def generate_path(duration, mask):
- """
- duration: [b, 1, t_x]
- mask: [b, 1, t_y, t_x]
- """
- device = duration.device
-
- b, _, t_y, t_x = mask.shape
- cum_duration = torch.cumsum(duration, -1)
-
- cum_duration_flat = cum_duration.view(b * t_x)
- path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype)
- path = path.view(b, t_x, t_y)
- path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1]
- path = path.unsqueeze(1).transpose(2,3) * mask
- return path
-
-
-def clip_grad_value_(parameters, clip_value, norm_type=2):
- if isinstance(parameters, torch.Tensor):
- parameters = [parameters]
- parameters = list(filter(lambda p: p.grad is not None, parameters))
- norm_type = float(norm_type)
- if clip_value is not None:
- clip_value = float(clip_value)
-
- total_norm = 0
- for p in parameters:
- param_norm = p.grad.data.norm(norm_type)
- total_norm += param_norm.item() ** norm_type
- if clip_value is not None:
- p.grad.data.clamp_(min=-clip_value, max=clip_value)
- total_norm = total_norm ** (1. / norm_type)
- return total_norm
diff --git a/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/text/cleaner.py b/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/text/cleaner.py
deleted file mode 100644
index 64bd5f7296f66c94f3a335666c53706bb5fe5b39..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Jiaohuaji-Bert-Vits2/text/cleaner.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from text import chinese, cleaned_text_to_sequence
-
-
-language_module_map = {
- 'ZH': chinese
-}
-
-
-def clean_text(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- return norm_text, phones, tones, word2ph
-
-def clean_text_bert(text, language):
- language_module = language_module_map[language]
- norm_text = language_module.text_normalize(text)
- phones, tones, word2ph = language_module.g2p(norm_text)
- bert = language_module.get_bert_feature(norm_text, word2ph)
- return phones, tones, bert
-
-def text_to_sequence(text, language):
- norm_text, phones, tones, word2ph = clean_text(text, language)
- return cleaned_text_to_sequence(phones, tones, language)
-
-if __name__ == '__main__':
- pass
diff --git a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/text/chinese.py b/spaces/digitalxingtong/Jiuxia-Bert-Vits2/text/chinese.py
deleted file mode 100644
index 276753880b73de2e8889dcb2101cd98c09e0710b..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Jiuxia-Bert-Vits2/text/chinese.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import os
-import re
-
-import cn2an
-from pypinyin import lazy_pinyin, Style
-
-from text import symbols
-from text.symbols import punctuation
-from text.tone_sandhi import ToneSandhi
-
-current_file_path = os.path.dirname(__file__)
-pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in
- open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()}
-
-import jieba.posseg as psg
-
-
-rep_map = {
- ':': ',',
- ';': ',',
- ',': ',',
- '。': '.',
- '!': '!',
- '?': '?',
- '\n': '.',
- "·": ",",
- '、': ",",
- '...': '…',
- '$': '.',
- '“': "'",
- '”': "'",
- '‘': "'",
- '’': "'",
- '(': "'",
- ')': "'",
- '(': "'",
- ')': "'",
- '《': "'",
- '》': "'",
- '【': "'",
- '】': "'",
- '[': "'",
- ']': "'",
- '—': "-",
- '~': "-",
- '~': "-",
- '「': "'",
- '」': "'",
-
-}
-
-tone_modifier = ToneSandhi()
-
-def replace_punctuation(text):
- text = text.replace("嗯", "恩").replace("呣","母")
- pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys()))
-
- replaced_text = pattern.sub(lambda x: rep_map[x.group()], text)
-
- replaced_text = re.sub(r'[^\u4e00-\u9fa5'+"".join(punctuation)+r']+', '', replaced_text)
-
- return replaced_text
-
-def g2p(text):
- pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation))
- sentences = [i for i in re.split(pattern, text) if i.strip()!='']
- phones, tones, word2ph = _g2p(sentences)
- assert sum(word2ph) == len(phones)
- assert len(word2ph) == len(text) #Sometimes it will crash,you can add a try-catch.
- phones = ['_'] + phones + ["_"]
- tones = [0] + tones + [0]
- word2ph = [1] + word2ph + [1]
- return phones, tones, word2ph
-
-
-def _get_initials_finals(word):
- initials = []
- finals = []
- orig_initials = lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.INITIALS)
- orig_finals = lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
- for c, v in zip(orig_initials, orig_finals):
- initials.append(c)
- finals.append(v)
- return initials, finals
-
-
-def _g2p(segments):
- phones_list = []
- tones_list = []
- word2ph = []
- for seg in segments:
- pinyins = []
- # Replace all English words in the sentence
- seg = re.sub('[a-zA-Z]+', '', seg)
- seg_cut = psg.lcut(seg)
- initials = []
- finals = []
- seg_cut = tone_modifier.pre_merge_for_modify(seg_cut)
- for word, pos in seg_cut:
- if pos == 'eng':
- continue
- sub_initials, sub_finals = _get_initials_finals(word)
- sub_finals = tone_modifier.modified_tone(word, pos,
- sub_finals)
- initials.append(sub_initials)
- finals.append(sub_finals)
-
- # assert len(sub_initials) == len(sub_finals) == len(word)
- initials = sum(initials, [])
- finals = sum(finals, [])
- #
- for c, v in zip(initials, finals):
- raw_pinyin = c+v
- # NOTE: post process for pypinyin outputs
- # we discriminate i, ii and iii
- if c == v:
- assert c in punctuation
- phone = [c]
- tone = '0'
- word2ph.append(1)
- else:
- v_without_tone = v[:-1]
- tone = v[-1]
-
- pinyin = c+v_without_tone
- assert tone in '12345'
-
- if c:
- # 多音节
- v_rep_map = {
- "uei": 'ui',
- 'iou': 'iu',
- 'uen': 'un',
- }
- if v_without_tone in v_rep_map.keys():
- pinyin = c+v_rep_map[v_without_tone]
- else:
- # 单音节
- pinyin_rep_map = {
- 'ing': 'ying',
- 'i': 'yi',
- 'in': 'yin',
- 'u': 'wu',
- }
- if pinyin in pinyin_rep_map.keys():
- pinyin = pinyin_rep_map[pinyin]
- else:
- single_rep_map = {
- 'v': 'yu',
- 'e': 'e',
- 'i': 'y',
- 'u': 'w',
- }
- if pinyin[0] in single_rep_map.keys():
- pinyin = single_rep_map[pinyin[0]]+pinyin[1:]
-
- assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin)
- phone = pinyin_to_symbol_map[pinyin].split(' ')
- word2ph.append(len(phone))
-
- phones_list += phone
- tones_list += [int(tone)] * len(phone)
- return phones_list, tones_list, word2ph
-
-
-
-def text_normalize(text):
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
- for number in numbers:
- text = text.replace(number, cn2an.an2cn(number), 1)
- text = replace_punctuation(text)
- return text
-
-def get_bert_feature(text, word2ph):
- from text import chinese_bert
- return chinese_bert.get_bert_feature(text, word2ph)
-
-if __name__ == '__main__':
- from text.chinese_bert import get_bert_feature
- text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏"
- text = text_normalize(text)
- print(text)
- phones, tones, word2ph = g2p(text)
- bert = get_bert_feature(text, word2ph)
-
- print(phones, tones, word2ph, bert.shape)
-
-
-# # 示例用法
-# text = "这是一个示例文本:,你好!这是一个测试...."
-# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试
diff --git a/spaces/digitalxingtong/Nanami-Bert-VITS2/transcribe_genshin.py b/spaces/digitalxingtong/Nanami-Bert-VITS2/transcribe_genshin.py
deleted file mode 100644
index acc98814af6189d129ab85946525bec55419a33f..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Nanami-Bert-VITS2/transcribe_genshin.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# coding=gbk
-import os
-import argparse
-import librosa
-import numpy as np
-from multiprocessing import Pool, cpu_count
-
-import soundfile
-from scipy.io import wavfile
-from tqdm import tqdm
-
-global speaker_annos
-speaker_annos = []
-
-def process(item):
- spkdir, wav_name, args = item
- speaker = spkdir.replace("\\", "/").split("/")[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- if os.path.exists(wav_path) and '.wav' in wav_path:
- os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True)
- wav, sr = librosa.load(wav_path, sr=args.sr)
- soundfile.write(
- os.path.join(args.out_dir, speaker, wav_name),
- wav,
- sr
- )
-
-def process_text(item):
- spkdir, wav_name, args = item
- speaker = spkdir.replace("\\", "/").split("/")[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- global speaker_annos
- tr_name = wav_name.replace('.wav', '')
- with open(args.out_dir+'/'+speaker+'/'+tr_name+'.lab', "r", encoding="utf-8") as file:
- text = file.read()
- text = text.replace("{NICKNAME}",'')
- text = text.replace("{M#}{F#}",'')
- text = text.replace("{M#}{F#}",'')
- substring = "{M#}{F#}"
- if substring in text:
- if tr_name.endswith("a"):
- text = text.replace("{M#}{F#}",'')
- if tr_name.endswith("b"):
- text = text.replace("{M#}{F#}",'')
- text = text.replace("#",'')
- text = "ZH|" + text + "\n" #
- speaker_annos.append(args.out_dir+'/'+speaker+'/'+wav_name+ "|" + speaker + "|" + text)
-
-
-
-if __name__ == "__main__":
- parent_dir = "./genshin_dataset/"
- speaker_names = list(os.walk(parent_dir))[0][1]
- parser = argparse.ArgumentParser()
- parser.add_argument("--sr", type=int, default=44100, help="sampling rate")
- parser.add_argument("--in_dir", type=str, default="./genshin_dataset", help="path to source dir")
- parser.add_argument("--out_dir", type=str, default="./genshin_dataset", help="path to target dir")
- args = parser.parse_args()
- # processs = 8
- processs = cpu_count()-2 if cpu_count() >4 else 1
- pool = Pool(processes=processs)
-
- for speaker in os.listdir(args.in_dir):
- spk_dir = os.path.join(args.in_dir, speaker)
- if os.path.isdir(spk_dir):
- print(spk_dir)
- for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
- pass
- for i in os.listdir(spk_dir):
- if i.endswith("wav"):
- pro=(spk_dir, i, args)
- process_text(pro)
- if len(speaker_annos) == 0:
- print("transcribe error!!!")
- with open("./filelists/short_character_anno.list", 'w', encoding='utf-8') as f:
- for line in speaker_annos:
- f.write(line)
- print("transcript file finished.")
diff --git a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/attentions.py b/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/attentions.py
deleted file mode 100644
index ecbdbc8be941a962046fc11fd6739b093112123e..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/attentions.py
+++ /dev/null
@@ -1,343 +0,0 @@
-import copy
-import math
-import numpy as np
-import torch
-from torch import nn
-from torch.nn import functional as F
-
-import commons
-import modules
-from torch.nn.utils import weight_norm, remove_weight_norm
-class LayerNorm(nn.Module):
- def __init__(self, channels, eps=1e-5):
- super().__init__()
- self.channels = channels
- self.eps = eps
-
- self.gamma = nn.Parameter(torch.ones(channels))
- self.beta = nn.Parameter(torch.zeros(channels))
-
- def forward(self, x):
- x = x.transpose(1, -1)
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
- return x.transpose(1, -1)
-
-
-
-@torch.jit.script
-def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
- n_channels_int = n_channels[0]
- in_act = input_a + input_b
- t_act = torch.tanh(in_act[:, :n_channels_int, :])
- s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
- acts = t_act * s_act
- return acts
-
-class Encoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.window_size = window_size
- if isflow:
- cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1)
- self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1)
- self.cond_layer = weight_norm(cond_layer, name='weight')
- self.gin_channels = 256
- self.cond_layer_idx = self.n_layers
- if 'gin_channels' in kwargs:
- self.gin_channels = kwargs['gin_channels']
- if self.gin_channels != 0:
- self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels)
- # vits2 says 3rd block, so idx is 2 by default
- self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2
- print(self.gin_channels, self.cond_layer_idx)
- assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers'
- self.drop = nn.Dropout(p_dropout)
- self.attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
- def forward(self, x, x_mask, g=None):
- attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- if i == self.cond_layer_idx and g is not None:
- g = self.spk_emb_linear(g.transpose(1, 2))
- g = g.transpose(1, 2)
- x = x + g
- x = x * x_mask
- y = self.attn_layers[i](x, x, attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class Decoder(nn.Module):
- def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
- super().__init__()
- self.hidden_channels = hidden_channels
- self.filter_channels = filter_channels
- self.n_heads = n_heads
- self.n_layers = n_layers
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
-
- self.drop = nn.Dropout(p_dropout)
- self.self_attn_layers = nn.ModuleList()
- self.norm_layers_0 = nn.ModuleList()
- self.encdec_attn_layers = nn.ModuleList()
- self.norm_layers_1 = nn.ModuleList()
- self.ffn_layers = nn.ModuleList()
- self.norm_layers_2 = nn.ModuleList()
- for i in range(self.n_layers):
- self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
- self.norm_layers_0.append(LayerNorm(hidden_channels))
- self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
- self.norm_layers_1.append(LayerNorm(hidden_channels))
- self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
- self.norm_layers_2.append(LayerNorm(hidden_channels))
-
- def forward(self, x, x_mask, h, h_mask):
- """
- x: decoder input
- h: encoder output
- """
- self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
- encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
- x = x * x_mask
- for i in range(self.n_layers):
- y = self.self_attn_layers[i](x, x, self_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_0[i](x + y)
-
- y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
- y = self.drop(y)
- x = self.norm_layers_1[i](x + y)
-
- y = self.ffn_layers[i](x, x_mask)
- y = self.drop(y)
- x = self.norm_layers_2[i](x + y)
- x = x * x_mask
- return x
-
-
-class MultiHeadAttention(nn.Module):
- def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
- super().__init__()
- assert channels % n_heads == 0
-
- self.channels = channels
- self.out_channels = out_channels
- self.n_heads = n_heads
- self.p_dropout = p_dropout
- self.window_size = window_size
- self.heads_share = heads_share
- self.block_length = block_length
- self.proximal_bias = proximal_bias
- self.proximal_init = proximal_init
- self.attn = None
-
- self.k_channels = channels // n_heads
- self.conv_q = nn.Conv1d(channels, channels, 1)
- self.conv_k = nn.Conv1d(channels, channels, 1)
- self.conv_v = nn.Conv1d(channels, channels, 1)
- self.conv_o = nn.Conv1d(channels, out_channels, 1)
- self.drop = nn.Dropout(p_dropout)
-
- if window_size is not None:
- n_heads_rel = 1 if heads_share else n_heads
- rel_stddev = self.k_channels**-0.5
- self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
- self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
-
- nn.init.xavier_uniform_(self.conv_q.weight)
- nn.init.xavier_uniform_(self.conv_k.weight)
- nn.init.xavier_uniform_(self.conv_v.weight)
- if proximal_init:
- with torch.no_grad():
- self.conv_k.weight.copy_(self.conv_q.weight)
- self.conv_k.bias.copy_(self.conv_q.bias)
-
- def forward(self, x, c, attn_mask=None):
- q = self.conv_q(x)
- k = self.conv_k(c)
- v = self.conv_v(c)
-
- x, self.attn = self.attention(q, k, v, mask=attn_mask)
-
- x = self.conv_o(x)
- return x
-
- def attention(self, query, key, value, mask=None):
- # reshape [b, d, t] -> [b, n_h, t, d_k]
- b, d, t_s, t_t = (*key.size(), query.size(2))
- query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
- key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
- value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
-
- scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
- if self.window_size is not None:
- assert t_s == t_t, "Relative attention is only available for self-attention."
- key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
- rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
- scores_local = self._relative_position_to_absolute_position(rel_logits)
- scores = scores + scores_local
- if self.proximal_bias:
- assert t_s == t_t, "Proximal bias is only available for self-attention."
- scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
- if mask is not None:
- scores = scores.masked_fill(mask == 0, -1e4)
- if self.block_length is not None:
- assert t_s == t_t, "Local attention is only available for self-attention."
- block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
- scores = scores.masked_fill(block_mask == 0, -1e4)
- p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
- p_attn = self.drop(p_attn)
- output = torch.matmul(p_attn, value)
- if self.window_size is not None:
- relative_weights = self._absolute_position_to_relative_position(p_attn)
- value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
- output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
- output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
- return output, p_attn
-
- def _matmul_with_relative_values(self, x, y):
- """
- x: [b, h, l, m]
- y: [h or 1, m, d]
- ret: [b, h, l, d]
- """
- ret = torch.matmul(x, y.unsqueeze(0))
- return ret
-
- def _matmul_with_relative_keys(self, x, y):
- """
- x: [b, h, l, d]
- y: [h or 1, m, d]
- ret: [b, h, l, m]
- """
- ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
- return ret
-
- def _get_relative_embeddings(self, relative_embeddings, length):
- max_relative_position = 2 * self.window_size + 1
- # Pad first before slice to avoid using cond ops.
- pad_length = max(length - (self.window_size + 1), 0)
- slice_start_position = max((self.window_size + 1) - length, 0)
- slice_end_position = slice_start_position + 2 * length - 1
- if pad_length > 0:
- padded_relative_embeddings = F.pad(
- relative_embeddings,
- commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
- else:
- padded_relative_embeddings = relative_embeddings
- used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
- return used_relative_embeddings
-
- def _relative_position_to_absolute_position(self, x):
- """
- x: [b, h, l, 2*l-1]
- ret: [b, h, l, l]
- """
- batch, heads, length, _ = x.size()
- # Concat columns of pad to shift from relative to absolute indexing.
- x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
-
- # Concat extra elements so to add up to shape (len+1, 2*len-1).
- x_flat = x.view([batch, heads, length * 2 * length])
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
-
- # Reshape and slice out the padded elements.
- x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
- return x_final
-
- def _absolute_position_to_relative_position(self, x):
- """
- x: [b, h, l, l]
- ret: [b, h, l, 2*l-1]
- """
- batch, heads, length, _ = x.size()
- # padd along column
- x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
- x_flat = x.view([batch, heads, length**2 + length*(length -1)])
- # add 0's in the beginning that will skew the elements after reshape
- x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
- x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
- return x_final
-
- def _attention_bias_proximal(self, length):
- """Bias for self-attention to encourage attention to close positions.
- Args:
- length: an integer scalar.
- Returns:
- a Tensor with shape [1, 1, length, length]
- """
- r = torch.arange(length, dtype=torch.float32)
- diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
- return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
-
-
-class FFN(nn.Module):
- def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
- super().__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.filter_channels = filter_channels
- self.kernel_size = kernel_size
- self.p_dropout = p_dropout
- self.activation = activation
- self.causal = causal
-
- if causal:
- self.padding = self._causal_padding
- else:
- self.padding = self._same_padding
-
- self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
- self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
- self.drop = nn.Dropout(p_dropout)
-
- def forward(self, x, x_mask):
- x = self.conv_1(self.padding(x * x_mask))
- if self.activation == "gelu":
- x = x * torch.sigmoid(1.702 * x)
- else:
- x = torch.relu(x)
- x = self.drop(x)
- x = self.conv_2(self.padding(x * x_mask))
- return x * x_mask
-
- def _causal_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = self.kernel_size - 1
- pad_r = 0
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
-
- def _same_padding(self, x):
- if self.kernel_size == 1:
- return x
- pad_l = (self.kernel_size - 1) // 2
- pad_r = self.kernel_size // 2
- padding = [[0, 0], [0, 0], [pad_l, pad_r]]
- x = F.pad(x, commons.convert_pad_shape(padding))
- return x
diff --git a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/text/tone_sandhi.py b/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/text/tone_sandhi.py
deleted file mode 100644
index 0f45b7a72c5d858bcaab19ac85cfa686bf9a74da..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/text/tone_sandhi.py
+++ /dev/null
@@ -1,351 +0,0 @@
-# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from typing import List
-from typing import Tuple
-
-import jieba
-from pypinyin import lazy_pinyin
-from pypinyin import Style
-
-
-class ToneSandhi():
- def __init__(self):
- self.must_neural_tone_words = {
- '麻烦', '麻利', '鸳鸯', '高粱', '骨头', '骆驼', '马虎', '首饰', '馒头', '馄饨', '风筝',
- '难为', '队伍', '阔气', '闺女', '门道', '锄头', '铺盖', '铃铛', '铁匠', '钥匙', '里脊',
- '里头', '部分', '那么', '道士', '造化', '迷糊', '连累', '这么', '这个', '运气', '过去',
- '软和', '转悠', '踏实', '跳蚤', '跟头', '趔趄', '财主', '豆腐', '讲究', '记性', '记号',
- '认识', '规矩', '见识', '裁缝', '补丁', '衣裳', '衣服', '衙门', '街坊', '行李', '行当',
- '蛤蟆', '蘑菇', '薄荷', '葫芦', '葡萄', '萝卜', '荸荠', '苗条', '苗头', '苍蝇', '芝麻',
- '舒服', '舒坦', '舌头', '自在', '膏药', '脾气', '脑袋', '脊梁', '能耐', '胳膊', '胭脂',
- '胡萝', '胡琴', '胡同', '聪明', '耽误', '耽搁', '耷拉', '耳朵', '老爷', '老实', '老婆',
- '老头', '老太', '翻腾', '罗嗦', '罐头', '编辑', '结实', '红火', '累赘', '糨糊', '糊涂',
- '精神', '粮食', '簸箕', '篱笆', '算计', '算盘', '答应', '笤帚', '笑语', '笑话', '窟窿',
- '窝囊', '窗户', '稳当', '稀罕', '称呼', '秧歌', '秀气', '秀才', '福气', '祖宗', '砚台',
- '码头', '石榴', '石头', '石匠', '知识', '眼睛', '眯缝', '眨巴', '眉毛', '相声', '盘算',
- '白净', '痢疾', '痛快', '疟疾', '疙瘩', '疏忽', '畜生', '生意', '甘蔗', '琵琶', '琢磨',
- '琉璃', '玻璃', '玫瑰', '玄乎', '狐狸', '状元', '特务', '牲口', '牙碜', '牌楼', '爽快',
- '爱人', '热闹', '烧饼', '烟筒', '烂糊', '点心', '炊帚', '灯笼', '火候', '漂亮', '滑溜',
- '溜达', '温和', '清楚', '消息', '浪头', '活泼', '比方', '正经', '欺负', '模糊', '槟榔',
- '棺材', '棒槌', '棉花', '核桃', '栅栏', '柴火', '架势', '枕头', '枇杷', '机灵', '本事',
- '木头', '木匠', '朋友', '月饼', '月亮', '暖和', '明白', '时候', '新鲜', '故事', '收拾',
- '收成', '提防', '挖苦', '挑剔', '指甲', '指头', '拾掇', '拳头', '拨弄', '招牌', '招呼',
- '抬举', '护士', '折腾', '扫帚', '打量', '打算', '打点', '打扮', '打听', '打发', '扎实',
- '扁担', '戒指', '懒得', '意识', '意思', '情形', '悟性', '怪物', '思量', '怎么', '念头',
- '念叨', '快活', '忙活', '志气', '心思', '得罪', '张罗', '弟兄', '开通', '应酬', '庄稼',
- '干事', '帮手', '帐篷', '希罕', '师父', '师傅', '巴结', '巴掌', '差事', '工夫', '岁数',
- '屁股', '尾巴', '少爷', '小气', '小伙', '将就', '对头', '对付', '寡妇', '家伙', '客气',
- '实在', '官司', '学问', '学生', '字号', '嫁妆', '媳妇', '媒人', '婆家', '娘家', '委屈',
- '姑娘', '姐夫', '妯娌', '妥当', '妖精', '奴才', '女婿', '头发', '太阳', '大爷', '大方',
- '大意', '大夫', '多少', '多么', '外甥', '壮实', '地道', '地方', '在乎', '困难', '嘴巴',
- '嘱咐', '嘟囔', '嘀咕', '喜欢', '喇嘛', '喇叭', '商量', '唾沫', '哑巴', '哈欠', '哆嗦',
- '咳嗽', '和尚', '告诉', '告示', '含糊', '吓唬', '后头', '名字', '名堂', '合同', '吆喝',
- '叫唤', '口袋', '厚道', '厉害', '千斤', '包袱', '包涵', '匀称', '勤快', '动静', '动弹',
- '功夫', '力气', '前头', '刺猬', '刺激', '别扭', '利落', '利索', '利害', '分析', '出息',
- '凑合', '凉快', '冷战', '冤枉', '冒失', '养活', '关系', '先生', '兄弟', '便宜', '使唤',
- '佩服', '作坊', '体面', '位置', '似的', '伙计', '休息', '什么', '人家', '亲戚', '亲家',
- '交情', '云彩', '事情', '买卖', '主意', '丫头', '丧气', '两口', '东西', '东家', '世故',
- '不由', '不在', '下水', '下巴', '上头', '上司', '丈夫', '丈人', '一辈', '那个', '菩萨',
- '父亲', '母亲', '咕噜', '邋遢', '费用', '冤家', '甜头', '介绍', '荒唐', '大人', '泥鳅',
- '幸福', '熟悉', '计划', '扑腾', '蜡烛', '姥爷', '照顾', '喉咙', '吉他', '弄堂', '蚂蚱',
- '凤凰', '拖沓', '寒碜', '糟蹋', '倒腾', '报复', '逻辑', '盘缠', '喽啰', '牢骚', '咖喱',
- '扫把', '惦记'
- }
- self.must_not_neural_tone_words = {
- "男子", "女子", "分子", "原子", "量子", "莲子", "石子", "瓜子", "电子", "人人", "虎虎"
- }
- self.punc = ":,;。?!“”‘’':,;.?!"
-
- # the meaning of jieba pos tag: https://blog.csdn.net/weixin_44174352/article/details/113731041
- # e.g.
- # word: "家里"
- # pos: "s"
- # finals: ['ia1', 'i3']
- def _neural_sandhi(self, word: str, pos: str,
- finals: List[str]) -> List[str]:
-
- # reduplication words for n. and v. e.g. 奶奶, 试试, 旺旺
- for j, item in enumerate(word):
- if j - 1 >= 0 and item == word[j - 1] and pos[0] in {
- "n", "v", "a"
- } and word not in self.must_not_neural_tone_words:
- finals[j] = finals[j][:-1] + "5"
- ge_idx = word.find("个")
- if len(word) >= 1 and word[-1] in "吧呢啊呐噻嘛吖嗨呐哦哒额滴哩哟喽啰耶喔诶":
- finals[-1] = finals[-1][:-1] + "5"
- elif len(word) >= 1 and word[-1] in "的地得":
- finals[-1] = finals[-1][:-1] + "5"
- # e.g. 走了, 看着, 去过
- # elif len(word) == 1 and word in "了着过" and pos in {"ul", "uz", "ug"}:
- # finals[-1] = finals[-1][:-1] + "5"
- elif len(word) > 1 and word[-1] in "们子" and pos in {
- "r", "n"
- } and word not in self.must_not_neural_tone_words:
- finals[-1] = finals[-1][:-1] + "5"
- # e.g. 桌上, 地下, 家里
- elif len(word) > 1 and word[-1] in "上下里" and pos in {"s", "l", "f"}:
- finals[-1] = finals[-1][:-1] + "5"
- # e.g. 上来, 下去
- elif len(word) > 1 and word[-1] in "来去" and word[-2] in "上下进出回过起开":
- finals[-1] = finals[-1][:-1] + "5"
- # 个做量词
- elif (ge_idx >= 1 and
- (word[ge_idx - 1].isnumeric() or
- word[ge_idx - 1] in "几有两半多各整每做是")) or word == '个':
- finals[ge_idx] = finals[ge_idx][:-1] + "5"
- else:
- if word in self.must_neural_tone_words or word[
- -2:] in self.must_neural_tone_words:
- finals[-1] = finals[-1][:-1] + "5"
-
- word_list = self._split_word(word)
- finals_list = [finals[:len(word_list[0])], finals[len(word_list[0]):]]
- for i, word in enumerate(word_list):
- # conventional neural in Chinese
- if word in self.must_neural_tone_words or word[
- -2:] in self.must_neural_tone_words:
- finals_list[i][-1] = finals_list[i][-1][:-1] + "5"
- finals = sum(finals_list, [])
- return finals
-
- def _bu_sandhi(self, word: str, finals: List[str]) -> List[str]:
- # e.g. 看不懂
- if len(word) == 3 and word[1] == "不":
- finals[1] = finals[1][:-1] + "5"
- else:
- for i, char in enumerate(word):
- # "不" before tone4 should be bu2, e.g. 不怕
- if char == "不" and i + 1 < len(word) and finals[i +
- 1][-1] == "4":
- finals[i] = finals[i][:-1] + "2"
- return finals
-
- def _yi_sandhi(self, word: str, finals: List[str]) -> List[str]:
- # "一" in number sequences, e.g. 一零零, 二一零
- if word.find("一") != -1 and all(
- [item.isnumeric() for item in word if item != "一"]):
- return finals
- # "一" between reduplication words shold be yi5, e.g. 看一看
- elif len(word) == 3 and word[1] == "一" and word[0] == word[-1]:
- finals[1] = finals[1][:-1] + "5"
- # when "一" is ordinal word, it should be yi1
- elif word.startswith("第一"):
- finals[1] = finals[1][:-1] + "1"
- else:
- for i, char in enumerate(word):
- if char == "一" and i + 1 < len(word):
- # "一" before tone4 should be yi2, e.g. 一段
- if finals[i + 1][-1] == "4":
- finals[i] = finals[i][:-1] + "2"
- # "一" before non-tone4 should be yi4, e.g. 一天
- else:
- # "一" 后面如果是标点,还读一声
- if word[i + 1] not in self.punc:
- finals[i] = finals[i][:-1] + "4"
- return finals
-
- def _split_word(self, word: str) -> List[str]:
- word_list = jieba.cut_for_search(word)
- word_list = sorted(word_list, key=lambda i: len(i), reverse=False)
- first_subword = word_list[0]
- first_begin_idx = word.find(first_subword)
- if first_begin_idx == 0:
- second_subword = word[len(first_subword):]
- new_word_list = [first_subword, second_subword]
- else:
- second_subword = word[:-len(first_subword)]
- new_word_list = [second_subword, first_subword]
- return new_word_list
-
- def _three_sandhi(self, word: str, finals: List[str]) -> List[str]:
- if len(word) == 2 and self._all_tone_three(finals):
- finals[0] = finals[0][:-1] + "2"
- elif len(word) == 3:
- word_list = self._split_word(word)
- if self._all_tone_three(finals):
- # disyllabic + monosyllabic, e.g. 蒙古/包
- if len(word_list[0]) == 2:
- finals[0] = finals[0][:-1] + "2"
- finals[1] = finals[1][:-1] + "2"
- # monosyllabic + disyllabic, e.g. 纸/老虎
- elif len(word_list[0]) == 1:
- finals[1] = finals[1][:-1] + "2"
- else:
- finals_list = [
- finals[:len(word_list[0])], finals[len(word_list[0]):]
- ]
- if len(finals_list) == 2:
- for i, sub in enumerate(finals_list):
- # e.g. 所有/人
- if self._all_tone_three(sub) and len(sub) == 2:
- finals_list[i][0] = finals_list[i][0][:-1] + "2"
- # e.g. 好/喜欢
- elif i == 1 and not self._all_tone_three(sub) and finals_list[i][0][-1] == "3" and \
- finals_list[0][-1][-1] == "3":
-
- finals_list[0][-1] = finals_list[0][-1][:-1] + "2"
- finals = sum(finals_list, [])
- # split idiom into two words who's length is 2
- elif len(word) == 4:
- finals_list = [finals[:2], finals[2:]]
- finals = []
- for sub in finals_list:
- if self._all_tone_three(sub):
- sub[0] = sub[0][:-1] + "2"
- finals += sub
-
- return finals
-
- def _all_tone_three(self, finals: List[str]) -> bool:
- return all(x[-1] == "3" for x in finals)
-
- # merge "不" and the word behind it
- # if don't merge, "不" sometimes appears alone according to jieba, which may occur sandhi error
- def _merge_bu(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- new_seg = []
- last_word = ""
- for word, pos in seg:
- if last_word == "不":
- word = last_word + word
- if word != "不":
- new_seg.append((word, pos))
- last_word = word[:]
- if last_word == "不":
- new_seg.append((last_word, 'd'))
- last_word = ""
- return new_seg
-
- # function 1: merge "一" and reduplication words in it's left and right, e.g. "听","一","听" ->"听一听"
- # function 2: merge single "一" and the word behind it
- # if don't merge, "一" sometimes appears alone according to jieba, which may occur sandhi error
- # e.g.
- # input seg: [('听', 'v'), ('一', 'm'), ('听', 'v')]
- # output seg: [['听一听', 'v']]
- def _merge_yi(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- new_seg = []
- # function 1
- for i, (word, pos) in enumerate(seg):
- if i - 1 >= 0 and word == "一" and i + 1 < len(seg) and seg[i - 1][
- 0] == seg[i + 1][0] and seg[i - 1][1] == "v":
- new_seg[i - 1][0] = new_seg[i - 1][0] + "一" + new_seg[i - 1][0]
- else:
- if i - 2 >= 0 and seg[i - 1][0] == "一" and seg[i - 2][
- 0] == word and pos == "v":
- continue
- else:
- new_seg.append([word, pos])
- seg = new_seg
- new_seg = []
- # function 2
- for i, (word, pos) in enumerate(seg):
- if new_seg and new_seg[-1][0] == "一":
- new_seg[-1][0] = new_seg[-1][0] + word
- else:
- new_seg.append([word, pos])
- return new_seg
-
- # the first and the second words are all_tone_three
- def _merge_continuous_three_tones(
- self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- new_seg = []
- sub_finals_list = [
- lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
- for (word, pos) in seg
- ]
- assert len(sub_finals_list) == len(seg)
- merge_last = [False] * len(seg)
- for i, (word, pos) in enumerate(seg):
- if i - 1 >= 0 and self._all_tone_three(
- sub_finals_list[i - 1]) and self._all_tone_three(
- sub_finals_list[i]) and not merge_last[i - 1]:
- # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
- if not self._is_reduplication(seg[i - 1][0]) and len(
- seg[i - 1][0]) + len(seg[i][0]) <= 3:
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
- merge_last[i] = True
- else:
- new_seg.append([word, pos])
- else:
- new_seg.append([word, pos])
-
- return new_seg
-
- def _is_reduplication(self, word: str) -> bool:
- return len(word) == 2 and word[0] == word[1]
-
- # the last char of first word and the first char of second word is tone_three
- def _merge_continuous_three_tones_2(
- self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- new_seg = []
- sub_finals_list = [
- lazy_pinyin(
- word, neutral_tone_with_five=True, style=Style.FINALS_TONE3)
- for (word, pos) in seg
- ]
- assert len(sub_finals_list) == len(seg)
- merge_last = [False] * len(seg)
- for i, (word, pos) in enumerate(seg):
- if i - 1 >= 0 and sub_finals_list[i - 1][-1][-1] == "3" and sub_finals_list[i][0][-1] == "3" and not \
- merge_last[i - 1]:
- # if the last word is reduplication, not merge, because reduplication need to be _neural_sandhi
- if not self._is_reduplication(seg[i - 1][0]) and len(
- seg[i - 1][0]) + len(seg[i][0]) <= 3:
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
- merge_last[i] = True
- else:
- new_seg.append([word, pos])
- else:
- new_seg.append([word, pos])
- return new_seg
-
- def _merge_er(self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- new_seg = []
- for i, (word, pos) in enumerate(seg):
- if i - 1 >= 0 and word == "儿" and seg[i-1][0] != "#":
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
- else:
- new_seg.append([word, pos])
- return new_seg
-
- def _merge_reduplication(
- self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- new_seg = []
- for i, (word, pos) in enumerate(seg):
- if new_seg and word == new_seg[-1][0]:
- new_seg[-1][0] = new_seg[-1][0] + seg[i][0]
- else:
- new_seg.append([word, pos])
- return new_seg
-
- def pre_merge_for_modify(
- self, seg: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
- seg = self._merge_bu(seg)
- try:
- seg = self._merge_yi(seg)
- except:
- print("_merge_yi failed")
- seg = self._merge_reduplication(seg)
- seg = self._merge_continuous_three_tones(seg)
- seg = self._merge_continuous_three_tones_2(seg)
- seg = self._merge_er(seg)
- return seg
-
- def modified_tone(self, word: str, pos: str,
- finals: List[str]) -> List[str]:
- finals = self._bu_sandhi(word, finals)
- finals = self._yi_sandhi(word, finals)
- finals = self._neural_sandhi(word, pos, finals)
- finals = self._three_sandhi(word, finals)
- return finals
diff --git a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/transcribe_genshin.py b/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/transcribe_genshin.py
deleted file mode 100644
index acc98814af6189d129ab85946525bec55419a33f..0000000000000000000000000000000000000000
--- a/spaces/digitalxingtong/Xingtong-Read-Bert-VITS2/transcribe_genshin.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# coding=gbk
-import os
-import argparse
-import librosa
-import numpy as np
-from multiprocessing import Pool, cpu_count
-
-import soundfile
-from scipy.io import wavfile
-from tqdm import tqdm
-
-global speaker_annos
-speaker_annos = []
-
-def process(item):
- spkdir, wav_name, args = item
- speaker = spkdir.replace("\\", "/").split("/")[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- if os.path.exists(wav_path) and '.wav' in wav_path:
- os.makedirs(os.path.join(args.out_dir, speaker), exist_ok=True)
- wav, sr = librosa.load(wav_path, sr=args.sr)
- soundfile.write(
- os.path.join(args.out_dir, speaker, wav_name),
- wav,
- sr
- )
-
-def process_text(item):
- spkdir, wav_name, args = item
- speaker = spkdir.replace("\\", "/").split("/")[-1]
- wav_path = os.path.join(args.in_dir, speaker, wav_name)
- global speaker_annos
- tr_name = wav_name.replace('.wav', '')
- with open(args.out_dir+'/'+speaker+'/'+tr_name+'.lab', "r", encoding="utf-8") as file:
- text = file.read()
- text = text.replace("{NICKNAME}",'')
- text = text.replace("{M#}{F#}",'')
- text = text.replace("{M#}{F#}",'')
- substring = "{M#}{F#}"
- if substring in text:
- if tr_name.endswith("a"):
- text = text.replace("{M#}{F#}",'')
- if tr_name.endswith("b"):
- text = text.replace("{M#}{F#}",'')
- text = text.replace("#",'')
- text = "ZH|" + text + "\n" #
- speaker_annos.append(args.out_dir+'/'+speaker+'/'+wav_name+ "|" + speaker + "|" + text)
-
-
-
-if __name__ == "__main__":
- parent_dir = "./genshin_dataset/"
- speaker_names = list(os.walk(parent_dir))[0][1]
- parser = argparse.ArgumentParser()
- parser.add_argument("--sr", type=int, default=44100, help="sampling rate")
- parser.add_argument("--in_dir", type=str, default="./genshin_dataset", help="path to source dir")
- parser.add_argument("--out_dir", type=str, default="./genshin_dataset", help="path to target dir")
- args = parser.parse_args()
- # processs = 8
- processs = cpu_count()-2 if cpu_count() >4 else 1
- pool = Pool(processes=processs)
-
- for speaker in os.listdir(args.in_dir):
- spk_dir = os.path.join(args.in_dir, speaker)
- if os.path.isdir(spk_dir):
- print(spk_dir)
- for _ in tqdm(pool.imap_unordered(process, [(spk_dir, i, args) for i in os.listdir(spk_dir) if i.endswith("wav")])):
- pass
- for i in os.listdir(spk_dir):
- if i.endswith("wav"):
- pro=(spk_dir, i, args)
- process_text(pro)
- if len(speaker_annos) == 0:
- print("transcribe error!!!")
- with open("./filelists/short_character_anno.list", 'w', encoding='utf-8') as f:
- for line in speaker_annos:
- f.write(line)
- print("transcript file finished.")
diff --git a/spaces/dineshreddy/WALT/mmdet/models/backbones/detectors_resnext.py b/spaces/dineshreddy/WALT/mmdet/models/backbones/detectors_resnext.py
deleted file mode 100644
index 57d032fe37ed82d5ba24e761bdc014cc0ee5ac64..0000000000000000000000000000000000000000
--- a/spaces/dineshreddy/WALT/mmdet/models/backbones/detectors_resnext.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import math
-
-from mmcv.cnn import build_conv_layer, build_norm_layer
-
-from ..builder import BACKBONES
-from .detectors_resnet import Bottleneck as _Bottleneck
-from .detectors_resnet import DetectoRS_ResNet
-
-
-class Bottleneck(_Bottleneck):
- expansion = 4
-
- def __init__(self,
- inplanes,
- planes,
- groups=1,
- base_width=4,
- base_channels=64,
- **kwargs):
- """Bottleneck block for ResNeXt.
-
- If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
- it is "caffe", the stride-two layer is the first 1x1 conv layer.
- """
- super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
-
- if groups == 1:
- width = self.planes
- else:
- width = math.floor(self.planes *
- (base_width / base_channels)) * groups
-
- self.norm1_name, norm1 = build_norm_layer(
- self.norm_cfg, width, postfix=1)
- self.norm2_name, norm2 = build_norm_layer(
- self.norm_cfg, width, postfix=2)
- self.norm3_name, norm3 = build_norm_layer(
- self.norm_cfg, self.planes * self.expansion, postfix=3)
-
- self.conv1 = build_conv_layer(
- self.conv_cfg,
- self.inplanes,
- width,
- kernel_size=1,
- stride=self.conv1_stride,
- bias=False)
- self.add_module(self.norm1_name, norm1)
- fallback_on_stride = False
- self.with_modulated_dcn = False
- if self.with_dcn:
- fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
- if self.with_sac:
- self.conv2 = build_conv_layer(
- self.sac,
- width,
- width,
- kernel_size=3,
- stride=self.conv2_stride,
- padding=self.dilation,
- dilation=self.dilation,
- groups=groups,
- bias=False)
- elif not self.with_dcn or fallback_on_stride:
- self.conv2 = build_conv_layer(
- self.conv_cfg,
- width,
- width,
- kernel_size=3,
- stride=self.conv2_stride,
- padding=self.dilation,
- dilation=self.dilation,
- groups=groups,
- bias=False)
- else:
- assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
- self.conv2 = build_conv_layer(
- self.dcn,
- width,
- width,
- kernel_size=3,
- stride=self.conv2_stride,
- padding=self.dilation,
- dilation=self.dilation,
- groups=groups,
- bias=False)
-
- self.add_module(self.norm2_name, norm2)
- self.conv3 = build_conv_layer(
- self.conv_cfg,
- width,
- self.planes * self.expansion,
- kernel_size=1,
- bias=False)
- self.add_module(self.norm3_name, norm3)
-
-
-@BACKBONES.register_module()
-class DetectoRS_ResNeXt(DetectoRS_ResNet):
- """ResNeXt backbone for DetectoRS.
-
- Args:
- groups (int): The number of groups in ResNeXt.
- base_width (int): The base width of ResNeXt.
- """
-
- arch_settings = {
- 50: (Bottleneck, (3, 4, 6, 3)),
- 101: (Bottleneck, (3, 4, 23, 3)),
- 152: (Bottleneck, (3, 8, 36, 3))
- }
-
- def __init__(self, groups=1, base_width=4, **kwargs):
- self.groups = groups
- self.base_width = base_width
- super(DetectoRS_ResNeXt, self).__init__(**kwargs)
-
- def make_res_layer(self, **kwargs):
- return super().make_res_layer(
- groups=self.groups,
- base_width=self.base_width,
- base_channels=self.base_channels,
- **kwargs)
diff --git a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_pipelines/master_pipeline.py b/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_pipelines/master_pipeline.py
deleted file mode 100644
index 2071df4f665932dacd4a827e418603996fb562c8..0000000000000000000000000000000000000000
--- a/spaces/dinhminh20521597/OCR_DEMO/configs/_base_/recog_pipelines/master_pipeline.py
+++ /dev/null
@@ -1,42 +0,0 @@
-img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='ResizeOCR',
- height=48,
- min_width=48,
- max_width=160,
- keep_aspect_ratio=True),
- dict(type='ToTensorOCR'),
- dict(type='NormalizeOCR', **img_norm_cfg),
- dict(
- type='Collect',
- keys=['img'],
- meta_keys=[
- 'filename', 'ori_shape', 'img_shape', 'text', 'valid_ratio',
- 'resize_shape'
- ]),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiRotateAugOCR',
- rotate_degrees=[0, 90, 270],
- transforms=[
- dict(
- type='ResizeOCR',
- height=48,
- min_width=48,
- max_width=160,
- keep_aspect_ratio=True),
- dict(type='ToTensorOCR'),
- dict(type='NormalizeOCR', **img_norm_cfg),
- dict(
- type='Collect',
- keys=['img'],
- meta_keys=[
- 'filename', 'ori_shape', 'img_shape', 'valid_ratio',
- 'img_norm_cfg', 'ori_filename', 'resize_shape'
- ]),
- ])
-]
diff --git a/spaces/dmeck/RVC-Speakers/speakers/server/bootstrap/__init__.py b/spaces/dmeck/RVC-Speakers/speakers/server/bootstrap/__init__.py
deleted file mode 100644
index ccaf0c7cc17700a7521df36fcc8bf6f38642baa8..0000000000000000000000000000000000000000
--- a/spaces/dmeck/RVC-Speakers/speakers/server/bootstrap/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from speakers.server.bootstrap.base import Bootstrap
-from speakers.server.bootstrap.runner_bootstrap import RunnerBootstrapBaseWeb
-from speakers.server.bootstrap.bootstrap_register import bootstrap_register
-
-__all__ = [
- "Bootstrap",
- "RunnerBootstrapBaseWeb"
-]
diff --git a/spaces/docparser/Text_Captcha_breaker/app.py b/spaces/docparser/Text_Captcha_breaker/app.py
deleted file mode 100644
index ce0749e6fa457d9bb67171b0557463bbcb03913c..0000000000000000000000000000000000000000
--- a/spaces/docparser/Text_Captcha_breaker/app.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import torch
-import onnx
-import onnxruntime as rt
-from torchvision import transforms as T
-from PIL import Image
-from tokenizer_base import Tokenizer
-import pathlib
-import os
-import gradio as gr
-from huggingface_hub import Repository
-
-repo = Repository(
- local_dir="secret_models",
- repo_type="model",
- clone_from="docparser/captcha",
- token=True
-)
-repo.git_pull()
-
-cwd = pathlib.Path(__file__).parent.resolve()
-model_file = os.path.join(cwd,"secret_models","captcha.onnx")
-img_size = (32,128)
-charset = r"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"
-tokenizer_base = Tokenizer(charset)
-
-def get_transform(img_size):
- transforms = []
- transforms.extend([
- T.Resize(img_size, T.InterpolationMode.BICUBIC),
- T.ToTensor(),
- T.Normalize(0.5, 0.5)
- ])
- return T.Compose(transforms)
-
-def to_numpy(tensor):
- return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
-
-def initialize_model(model_file):
- transform = get_transform(img_size)
- # Onnx model loading
- onnx_model = onnx.load(model_file)
- onnx.checker.check_model(onnx_model)
- ort_session = rt.InferenceSession(model_file)
- return transform,ort_session
-
-def get_text(img_org):
- # img_org = Image.open(image_path)
- # Preprocess. Model expects a batch of images with shape: (B, C, H, W)
- x = transform(img_org.convert('RGB')).unsqueeze(0)
-
- # compute ONNX Runtime output prediction
- ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}
- logits = ort_session.run(None, ort_inputs)[0]
- probs = torch.tensor(logits).softmax(-1)
- preds, probs = tokenizer_base.decode(probs)
- preds = preds[0]
- print(preds)
- return preds
-
-transform,ort_session = initialize_model(model_file=model_file)
-
-gr.Interface(
- get_text,
- inputs=gr.Image(type="pil"),
- outputs=gr.outputs.Textbox(),
- title="Text Captcha Reader",
- examples=["8000.png","11JW29.png","2a8486.jpg","2nbcx.png",
- "000679.png","000HU.png","00Uga.png.jpg","00bAQwhAZU.jpg",
- "00h57kYf.jpg","0EoHdtVb.png","0JS21.png","0p98z.png","10010.png"]
-).launch()
-
-# if __name__ == "__main__":
-# image_path = "8000.png"
-# preds,probs = get_text(image_path)
-# print(preds[0])
-
-
diff --git a/spaces/dorkai/text-generation-webui-main/docs/Spell-book.md b/spaces/dorkai/text-generation-webui-main/docs/Spell-book.md
deleted file mode 100644
index 9b7c76c953f76f8a486bbe5156de4e9ebb3f0ec0..0000000000000000000000000000000000000000
--- a/spaces/dorkai/text-generation-webui-main/docs/Spell-book.md
+++ /dev/null
@@ -1,107 +0,0 @@
-You have now entered a hidden corner of the internet.
-
-A confusing yet intriguing realm of paradoxes and contradictions.
-
-A place where you will find out that what you thought you knew, you in fact didn't know, and what you didn't know was in front of you all along.
-
-
-
-*In other words, here I will document little-known facts about this web UI that I could not find another place for in the wiki.*
-
-#### You can train LoRAs in CPU mode
-
-Load the web UI with
-
-```
-python server.py --cpu
-```
-
-and start training the LoRA from the training tab as usual.
-
-#### 8-bit mode works with CPU offloading
-
-```
-python server.py --load-in-8bit --gpu-memory 4000MiB
-```
-
-#### `--pre_layer`, and not `--gpu-memory`, is the right way to do CPU offloading with 4-bit models
-
-```
-python server.py --wbits 4 --groupsize 128 --pre_layer 20
-```
-
-#### Models can be loaded in 32-bit, 16-bit, 8-bit, and 4-bit modes
-
-```
-python server.py --cpu
-python server.py
-python server.py --load-in-8bit
-python server.py --wbits 4
-```
-
-#### The web UI works with any version of GPTQ-for-LLaMa
-
-Including the up to date triton and cuda branches. But you have to delete the `repositories/GPTQ-for-LLaMa` folder and reinstall the new one every time:
-
-```
-cd text-generation-webui/repositories
-rm -r GPTQ-for-LLaMa
-pip uninstall quant-cuda
-git clone https://github.com/oobabooga/GPTQ-for-LLaMa -b cuda # or any other repository and branch
-cd GPTQ-for-LLaMa
-python setup_cuda.py install
-```
-
-#### Instruction-following templates are represented as chat characters
-
-https://github.com/oobabooga/text-generation-webui/tree/main/characters/instruction-following
-
-#### The right way to run Alpaca, Open Assistant, Vicuna, etc is Instruct mode, not normal chat mode
-
-Otherwise the prompt will not be formatted correctly.
-
-1. Start the web UI with
-
-```
-python server.py --chat
-```
-
-2. Click on the "instruct" option under "Chat modes"
-
-3. Select the correct template in the hidden dropdown menu that will become visible.
-
-#### Notebook mode is best mode
-
-Ascended individuals have realized that notebook mode is the superset of chat mode and can do chats with ultimate flexibility, including group chats, editing replies, starting a new bot reply in a given way, and impersonating.
-
-#### RWKV is a RNN
-
-Most models are transformers, but not RWKV, which is a RNN. It's a great model.
-
-#### `--gpu-memory` is not a hard limit on the GPU memory
-
-It is simply a parameter that is passed to the `accelerate` library while loading the model. More memory will be allocated during generation. That's why this parameter has to be set to less than your total GPU memory.
-
-#### Contrastive search perhaps the best preset
-
-But it uses a ton of VRAM.
-
-#### You can check the sha256sum of downloaded models with the download script
-
-```
-python download-model.py facebook/galactica-125m --check
-```
-
-#### The download script continues interrupted downloads by default
-
-It doesn't start over.
-
-#### You can download models with multiple threads
-
-```
-python download-model.py facebook/galactica-125m --threads 8
-```
-
-#### LoRAs work in 4-bit mode
-
-You need to follow [these instructions](GPTQ-models-(4-bit-mode).md#using-loras-in-4-bit-mode) and then start the web UI with the `--monkey-patch` flag.
diff --git a/spaces/dragao-elastico/RVC_V2/utils.py b/spaces/dragao-elastico/RVC_V2/utils.py
deleted file mode 100644
index 62be8d03a8e8b839f8747310ef0ec0e82fb8ff0a..0000000000000000000000000000000000000000
--- a/spaces/dragao-elastico/RVC_V2/utils.py
+++ /dev/null
@@ -1,151 +0,0 @@
-import ffmpeg
-import numpy as np
-
-# import praatio
-# import praatio.praat_scripts
-import os
-import sys
-
-import random
-
-import csv
-
-platform_stft_mapping = {
- "linux": "stftpitchshift",
- "darwin": "stftpitchshift",
- "win32": "stftpitchshift.exe",
-}
-
-stft = platform_stft_mapping.get(sys.platform)
-# praatEXE = join('.',os.path.abspath(os.getcwd()) + r"\Praat.exe")
-
-
-def CSVutil(file, rw, type, *args):
- if type == "formanting":
- if rw == "r":
- with open(file) as fileCSVread:
- csv_reader = list(csv.reader(fileCSVread))
- return (
- (csv_reader[0][0], csv_reader[0][1], csv_reader[0][2])
- if csv_reader is not None
- else (lambda: exec('raise ValueError("No data")'))()
- )
- else:
- if args:
- doformnt = args[0]
- else:
- doformnt = False
- qfr = args[1] if len(args) > 1 else 1.0
- tmb = args[2] if len(args) > 2 else 1.0
- with open(file, rw, newline="") as fileCSVwrite:
- csv_writer = csv.writer(fileCSVwrite, delimiter=",")
- csv_writer.writerow([doformnt, qfr, tmb])
- elif type == "stop":
- stop = args[0] if args else False
- with open(file, rw, newline="") as fileCSVwrite:
- csv_writer = csv.writer(fileCSVwrite, delimiter=",")
- csv_writer.writerow([stop])
-
-
-def load_audio(file, sr, DoFormant, Quefrency, Timbre):
- converted = False
- DoFormant, Quefrency, Timbre = CSVutil("csvdb/formanting.csv", "r", "formanting")
- try:
- # https://github.com/openai/whisper/blob/main/whisper/audio.py#L26
- # This launches a subprocess to decode audio while down-mixing and resampling as necessary.
- # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed.
- file = (
- file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
- ) # 防止小白拷路径头尾带了空格和"和回车
- file_formanted = file.strip(" ").strip('"').strip("\n").strip('"').strip(" ")
-
- # print(f"dofor={bool(DoFormant)} timbr={Timbre} quef={Quefrency}\n")
-
- if (
- lambda DoFormant: True
- if DoFormant.lower() == "true"
- else (False if DoFormant.lower() == "false" else DoFormant)
- )(DoFormant):
- numerator = round(random.uniform(1, 4), 4)
- # os.system(f"stftpitchshift -i {file} -q {Quefrency} -t {Timbre} -o {file_formanted}")
- # print('stftpitchshift -i "%s" -p 1.0 --rms -w 128 -v 8 -q %s -t %s -o "%s"' % (file, Quefrency, Timbre, file_formanted))
-
- if not file.endswith(".wav"):
- if not os.path.isfile(f"{file_formanted}.wav"):
- converted = True
- # print(f"\nfile = {file}\n")
- # print(f"\nfile_formanted = {file_formanted}\n")
- converting = (
- ffmpeg.input(file_formanted, threads=0)
- .output(f"{file_formanted}.wav")
- .run(
- cmd=["ffmpeg", "-nostdin"],
- capture_stdout=True,
- capture_stderr=True,
- )
- )
- else:
- pass
-
- file_formanted = (
- f"{file_formanted}.wav"
- if not file_formanted.endswith(".wav")
- else file_formanted
- )
-
- print(f" · Formanting {file_formanted}...\n")
-
- os.system(
- '%s -i "%s" -q "%s" -t "%s" -o "%sFORMANTED_%s.wav"'
- % (
- stft,
- file_formanted,
- Quefrency,
- Timbre,
- file_formanted,
- str(numerator),
- )
- )
-
- print(f" · Formanted {file_formanted}!\n")
-
- # filepraat = (os.path.abspath(os.getcwd()) + '\\' + file).replace('/','\\')
- # file_formantedpraat = ('"' + os.path.abspath(os.getcwd()) + '/' + 'formanted'.join(file_formanted) + '"').replace('/','\\')
- # print("%sFORMANTED_%s.wav" % (file_formanted, str(numerator)))
-
- out, _ = (
- ffmpeg.input(
- "%sFORMANTED_%s.wav" % (file_formanted, str(numerator)), threads=0
- )
- .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
- .run(
- cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True
- )
- )
-
- try:
- os.remove("%sFORMANTED_%s.wav" % (file_formanted, str(numerator)))
- except Exception:
- pass
- print("couldn't remove formanted type of file")
-
- else:
- out, _ = (
- ffmpeg.input(file, threads=0)
- .output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sr)
- .run(
- cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True
- )
- )
- except Exception as e:
- raise RuntimeError(f"Failed to load audio: {e}")
-
- if converted:
- try:
- os.remove(file_formanted)
- except Exception:
- pass
- print("couldn't remove converted type of file")
- converted = False
-
- return np.frombuffer(out, np.float32).flatten()
diff --git a/spaces/ds520/bingo/src/lib/hooks/use-copy-to-clipboard.tsx b/spaces/ds520/bingo/src/lib/hooks/use-copy-to-clipboard.tsx
deleted file mode 100644
index 62f7156dca246c46b213151af003a3a177977ccf..0000000000000000000000000000000000000000
--- a/spaces/ds520/bingo/src/lib/hooks/use-copy-to-clipboard.tsx
+++ /dev/null
@@ -1,33 +0,0 @@
-'use client'
-
-import * as React from 'react'
-
-export interface useCopyToClipboardProps {
- timeout?: number
-}
-
-export function useCopyToClipboard({
- timeout = 2000
-}: useCopyToClipboardProps) {
- const [isCopied, setIsCopied] = React.useState(false)
-
- const copyToClipboard = (value: string) => {
- if (typeof window === 'undefined' || !navigator.clipboard?.writeText) {
- return
- }
-
- if (!value) {
- return
- }
-
- navigator.clipboard.writeText(value).then(() => {
- setIsCopied(true)
-
- setTimeout(() => {
- setIsCopied(false)
- }, timeout)
- })
- }
-
- return { isCopied, copyToClipboard }
-}
diff --git a/spaces/elkraken/Video-Object-Detection/utils/aws/resume.py b/spaces/elkraken/Video-Object-Detection/utils/aws/resume.py
deleted file mode 100644
index 338685b19c19ddb47aa2fde22a535a8efcf17802..0000000000000000000000000000000000000000
--- a/spaces/elkraken/Video-Object-Detection/utils/aws/resume.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Resume all interrupted trainings in yolor/ dir including DDP trainings
-# Usage: $ python utils/aws/resume.py
-
-import os
-import sys
-from pathlib import Path
-
-import torch
-import yaml
-
-sys.path.append('./') # to run '$ python *.py' files in subdirectories
-
-port = 0 # --master_port
-path = Path('').resolve()
-for last in path.rglob('*/**/last.pt'):
- ckpt = torch.load(last)
- if ckpt['optimizer'] is None:
- continue
-
- # Load opt.yaml
- with open(last.parent.parent / 'opt.yaml') as f:
- opt = yaml.load(f, Loader=yaml.SafeLoader)
-
- # Get device count
- d = opt['device'].split(',') # devices
- nd = len(d) # number of devices
- ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel
-
- if ddp: # multi-GPU
- port += 1
- cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}'
- else: # single-GPU
- cmd = f'python train.py --resume {last}'
-
- cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread
- print(cmd)
- os.system(cmd)
diff --git a/spaces/eson/tokenizer-arena/vocab/moss/convert_vocab_to_txt.py b/spaces/eson/tokenizer-arena/vocab/moss/convert_vocab_to_txt.py
deleted file mode 100644
index b82bc4807144c434184193efb5128be7d445248f..0000000000000000000000000000000000000000
--- a/spaces/eson/tokenizer-arena/vocab/moss/convert_vocab_to_txt.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import json
-from transformers import AutoTokenizer
-
-tokenizer = AutoTokenizer.from_pretrained("tokenizer", trust_remote_code=True)
-
-vocab = tokenizer.get_vocab()
-
-sorted_vocab = sorted(vocab.items(), key=lambda kv:kv[1])
-
-f_out = open("20B_tokenizer.txt", "w", encoding="utf-8")
-for token, idx in sorted_vocab:
- decoded_token = tokenizer.decode([idx])
- f_out.write(json.dumps(
- {"id": idx, "token": token, "token_decode": decoded_token}, ensure_ascii=False) + "\n")
- # f_out.write(json.dumps({"id": idx, "token": token, "token_decode": decoded_token}) + "\t" + token + "\t" + decoded_token + "\n")
-
diff --git a/spaces/evanpierce/3D_Photo_Inpainting2/boostmonodepth_utils.py b/spaces/evanpierce/3D_Photo_Inpainting2/boostmonodepth_utils.py
deleted file mode 100644
index 5f752b0caf9b8c9a64d9113e10d8b1fb2fa782b0..0000000000000000000000000000000000000000
--- a/spaces/evanpierce/3D_Photo_Inpainting2/boostmonodepth_utils.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import os
-import cv2
-import glob
-import numpy as np
-import imageio
-from MiDaS.MiDaS_utils import write_depth
-
-BOOST_BASE = 'BoostingMonocularDepth'
-
-BOOST_INPUTS = 'inputs'
-BOOST_OUTPUTS = 'outputs'
-
-def run_boostmonodepth(img_names, src_folder, depth_folder):
-
- if not isinstance(img_names, list):
- img_names = [img_names]
-
- # remove irrelevant files first
- clean_folder(os.path.join(BOOST_BASE, BOOST_INPUTS))
- clean_folder(os.path.join(BOOST_BASE, BOOST_OUTPUTS))
-
- tgt_names = []
- for img_name in img_names:
- base_name = os.path.basename(img_name)
- tgt_name = os.path.join(BOOST_BASE, BOOST_INPUTS, base_name)
- os.system(f'cp {img_name} {tgt_name}')
-
- # keep only the file name here.
- # they save all depth as .png file
- tgt_names.append(os.path.basename(tgt_name).replace('.jpg', '.png'))
-
- os.system(f'cd {BOOST_BASE} && python run.py --Final --data_dir {BOOST_INPUTS}/ --output_dir {BOOST_OUTPUTS} --depthNet 0')
-
- for i, (img_name, tgt_name) in enumerate(zip(img_names, tgt_names)):
- img = imageio.imread(img_name)
- H, W = img.shape[:2]
- scale = 640. / max(H, W)
-
- # resize and save depth
- target_height, target_width = int(round(H * scale)), int(round(W * scale))
- depth = imageio.imread(os.path.join(BOOST_BASE, BOOST_OUTPUTS, tgt_name))
- depth = np.array(depth).astype(np.float32)
- depth = resize_depth(depth, target_width, target_height)
- np.save(os.path.join(depth_folder, tgt_name.replace('.png', '.npy')), depth / 32768. - 1.)
- write_depth(os.path.join(depth_folder, tgt_name.replace('.png', '')), depth)
-
-def clean_folder(folder, img_exts=['.png', '.jpg', '.npy']):
-
- for img_ext in img_exts:
- paths_to_check = os.path.join(folder, f'*{img_ext}')
- if len(glob.glob(paths_to_check)) == 0:
- continue
- print(paths_to_check)
- os.system(f'rm {paths_to_check}')
-
-def resize_depth(depth, width, height):
- """Resize numpy (or image read by imageio) depth map
-
- Args:
- depth (numpy): depth
- width (int): image width
- height (int): image height
-
- Returns:
- array: processed depth
- """
- depth = cv2.blur(depth, (3, 3))
- return cv2.resize(depth, (width, height), interpolation=cv2.INTER_AREA)
diff --git a/spaces/fatiXbelha/sd/Bus Simulator Indonesia Mod APK Download The Best Simulation Game for Bus Lovers.md b/spaces/fatiXbelha/sd/Bus Simulator Indonesia Mod APK Download The Best Simulation Game for Bus Lovers.md
deleted file mode 100644
index 073839b77b1a66340172029ec2085ec1b75685e7..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Bus Simulator Indonesia Mod APK Download The Best Simulation Game for Bus Lovers.md
+++ /dev/null
@@ -1,133 +0,0 @@
-
-
How to Download APK Mod Simulator Bus Indonesia
-
If you are a fan of bus driving games, you might have heard of Simulator Bus Indonesia, a popular mobile game that lets you experience what it is like to be a bus driver in Indonesia. But did you know that you can also download an APK mod version of the game that gives you more features and options? In this article, we will tell you what Simulator Bus Indonesia is, what APK mod is, and how to download and install the APK mod version of the game on your Android device.
Simulator Bus Indonesia, or BUSSID, is a mobile game developed by Maleo. As the name suggests, this bus driving game lets you get behind the wheels of a bus and drive through various cities in Indonesia. It comes with 3D graphics and offers two modes, letting you choose your preferred gameplay option to ensure that you are comfortable as you play.
-
Features of the game
-
Some of the top features of Simulator Bus Indonesia are:
-
-
You can design your own livery for your bus, or choose from the ones available in the game.
-
You can control your bus with easy and intuitive controls, such as steering wheel, buttons, or tilt.
-
You can explore authentic Indonesian cities and places, such as Jakarta, Surabaya, Bali, and more.
-
You can drive different types of Indonesian buses, such as PO Harapan Jaya, Sinar Jaya, Gunung Harta, and more.
-
You can honk your horn with cool and fun sounds, such as "Om Telolet Om!", a popular phrase among Indonesian bus enthusiasts.
-
You can save your data online and use your own 3D model using the vehicle mod system.
-
You can join online multiplayer convoys with other players and chat with them.
-
-
How to play the game
-
To play Simulator Bus Indonesia, you need to download and install the game from Google Play Store or other sources. Then, you need to choose a mode: career or free. In career mode, you need to complete missions and earn money by driving passengers from one place to another. In free mode, you can drive anywhere you want without any restrictions. You can also customize your bus and settings from the menu. To drive your bus, you need to use the controls on the screen, such as gas pedal, brake pedal, steering wheel, horn, lights, wipers, etc. You also need to follow the traffic rules and avoid accidents. You can check your map and GPS for directions and destinations.
-
What is APK Mod?
-
APK Mod is a modified version of an original APK file that has been altered by someone to add or remove some features from the original app. APK Mod files are usually not available on official app stores, but on third-party websites or platforms.
-
download bus simulator indonesia mod apk unlimited money
-download bus simulator indonesia mod apk latest version
-download bus simulator indonesia mod apk terbaru
-download bus simulator indonesia mod apk 2023
-download bus simulator indonesia mod apk offline
-download bus simulator indonesia mod apk free shopping
-download bus simulator indonesia mod apk unlimited fuel
-download bus simulator indonesia mod apk hack
-download bus simulator indonesia mod apk android 1
-download bus simulator indonesia mod apk revdl
-download bus simulator indonesia mod apk rexdl
-download bus simulator indonesia mod apk happymod
-download bus simulator indonesia mod apk no ads
-download bus simulator indonesia mod apk obb
-download bus simulator indonesia mod apk data
-download bus simulator indonesia mod apk full unlocked
-download bus simulator indonesia mod apk all skin
-download bus simulator indonesia mod apk all livery
-download bus simulator indonesia mod apk all map
-download bus simulator indonesia mod apk all vehicle
-download bus simulator indonesia mod apk anti banned
-download bus simulator indonesia mod apk cheat menu
-download bus simulator indonesia mod apk cheat engine
-download bus simulator indonesia mod apk custom horn
-download bus simulator indonesia mod apk custom music
-download bus simulator indonesia mod apk custom sticker
-download bus simulator indonesia mod apk custom traffic
-download bus simulator indonesia mod apk custom weather
-download bus simulator indonesia mod apk easy win
-download bus simulator indonesia mod apk endless money
-download bus simulator indonesia mod apk free fire skin
-download bus simulator indonesia mod apk free horn sound
-download bus simulator indonesia mod apk free livery bussid shd hd xhd arjuna xhd jetbus hd 3+
-download bus simulator indonesia mod apk free music player
-download bus simulator indonesia mod apk free sticker bussid shd hd xhd arjuna xhd jetbus hd 3+
-download bus simulator indonesia mod apk free traffic bussid shd hd xhd arjuna xhd jetbus hd 3+
-download bus simulator indonesia mod apk free weather bussid shd hd xhd arjuna xhd jetbus hd 3+
-download bus simulator indonesia mod apk full feature
-download bus simulator indonesia mod apk full graphic
-download bus simulator indonesia mod apk full map bussid shd hd xhd arjuna xhd jetbus hd 3+
-download bus simulator indonesia mod apk full skin bussid shd hd xhd arjuna xhd jetbus hd 3+
-download bus simulator indonesia mod apk full vehicle bussid shd hd xhd arjuna xhd jetbus hd 3+
-download bus simulator indonesia mod apk game guardian
-download bus simulator indonesia mod apk high graphic
-download bus simulator indonesia mod apk idbs studio
-download bus simulator indonesia mod apk maleo studio
-download bus simulator indonesia mod apk mega link
-download bus simulator indonesia mod apk no root
-download bus simulator indonesia mod apk pro version
-
Benefits of using APK Mod
-
Some of the benefits of using APK Mod are:
-
-
You can access premium features or content that are otherwise locked or paid in the original app.
-
You can bypass some restrictions or limitations that are imposed by the original app developer or publisher.
-
You can enhance your gaming experience with more options and possibilities that are not available in the original app.
-
-
Risks of using APK Mod
-
Some of the risks of using APK Mod are:
-
-
You may violate the terms and conditions of the original app developer or publisher, which may result in legal actions or bans.
-
You may expose your device to malware or viruses that may harm your data or system.
-
You may lose your progress or data if the modded app is not compatible with the original app version or server.
-
You may face ethical or moral issues if the modded app infringes the rights or interests of the original app developer or publisher.
-
-
How to download APK Mod Simulator Bus Indonesia
-
If you want to download and install the APK mod version of Simulator Bus Indonesia, you need to follow these steps:
-
Steps to download and install the mod
-
-
Find a reliable and trustworthy website or platform that offers the APK mod file of Simulator Bus Indonesia. You can search online or ask for recommendations from other players. Some of the popular websites that provide APK mod files are APKPure, APKMody, Rexdl, etc.
-
Download the APK mod file from the website or platform. Make sure you have enough storage space on your device and a stable internet connection. You may also need to enable the option of "Unknown sources" in your device settings to allow the installation of apps from sources other than Google Play Store.
-
Locate the downloaded APK mod file on your device and tap on it to start the installation process. Follow the instructions on the screen and grant the necessary permissions to the app.
-
Wait for the installation to complete and then launch the app from your device menu. You may need to verify your account or log in with your credentials if required.
-
Enjoy the modded version of Simulator Bus Indonesia with more features and options.
-
-
Tips to enjoy the modded game
-
Some of the tips to enjoy the modded version of Simulator Bus Indonesia are:
-
-
Make sure you have a compatible device and operating system that can run the modded app smoothly and without errors.
-
Check for updates or patches regularly to ensure that your modded app is up to date and working properly.
-
Be careful when using online multiplayer features or connecting with other players, as you may encounter hackers or cheaters who may ruin your gaming experience.
-
Do not use the modded app for illegal or unethical purposes, such as hacking, cheating, or stealing data from other players or servers.
-
Respect the original app developer and publisher and support them if you like their work. You can also buy the original app from Google Play Store or other sources if you want to support them financially.
-
-
Conclusion
-
In conclusion, Simulator Bus Indonesia is a fun and realistic bus driving game that lets you explore various Indonesian cities and places. You can also download an APK mod version of the game that gives you more features and options, such as unlimited money, unlocked buses, custom liveries, etc. However, you need to be careful when downloading and installing the APK mod file, as it may come with some risks and challenges. You also need to follow some tips to enjoy the modded game safely and ethically. We hope this article has helped you learn how to download APK mod Simulator Bus Indonesia and have a great time playing it.
-
FAQs
-
Here are some frequently asked questions about Simulator Bus Indonesia and its APK mod version:
-
Q: Is Simulator Bus Indonesia free?
-
A: Yes, Simulator Bus Indonesia is free to download and play from Google Play Store or other sources. However, it may contain some in-app purchases or ads that require real money.
-
Q: Is APK Mod Simulator Bus Indonesia safe?
-
A: It depends on where you download it from and how you use it. Some websites or platforms may provide safe and secure APK mod files, while others may contain malware or viruses that may harm your device or data. You also need to be careful when using online multiplayer features or connecting with other players, as you may encounter hackers or cheaters who may ruin your gaming experience. You also need to respect the original app developer and publisher and not use the modded app for illegal or unethical purposes.
-
Q: How can I update APK Mod Simulator Bus Indonesia?
-
A: You can update APK Mod Simulator Bus Indonesia by downloading and installing the latest version of the APK mod file from the same website or platform where you got it from. You may also need to uninstall the previous version of the app before installing the new one. However, some updates or patches may not be compatible with your modded app, so you need to check before updating.
-
Q: How can I uninstall APK Mod Simulator Bus Indonesia?
-
A: You can uninstall APK Mod Simulator Bus Indonesia by following these steps:
-
-
Go to your device settings and find the option of "Apps" or "Applications".
-
Find and tap on "Simulator Bus Indonesia" or "Simulator Bus Indonesia Mod" from the list of apps.
-
Tap on the option of "Uninstall" and confirm your action.
-
Wait for the uninstallation to complete and then check your device menu to make sure the app is gone.
-
-
Q: Where can I get more information or help about Simulator Bus Indonesia or its APK mod version?
-
A: You can get more information or help about Simulator Bus Indonesia or its APK mod version from these sources:
-
-
The official website of the game developer, Maleo, at [https://maleo.id/].
-
The official Facebook page of the game, at [https://www.facebook.com/BUSSIDGAME/].
-
The official Instagram account of the game, at [https://www.instagram.com/bussimulatorid/].
-
The official YouTube channel of the game, at [https://www.youtube.com/channel/UCdpdj7ZGO8i3cLyt_0MpkAg].
-
The online forums or communities of the game, such as [https://bussidgame.com/] or [https://www.reddit.com/r/BUSSID/].
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Disney Heroes Battle Mode - Collect Upgrade and Battle with 100 Disney and Pixar Heroes.md b/spaces/fatiXbelha/sd/Disney Heroes Battle Mode - Collect Upgrade and Battle with 100 Disney and Pixar Heroes.md
deleted file mode 100644
index 32301d1c274c8d3119bc7b24921e914a751c0804..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Disney Heroes Battle Mode - Collect Upgrade and Battle with 100 Disney and Pixar Heroes.md
+++ /dev/null
@@ -1,73 +0,0 @@
-
-
Disney Heroes Battle Mode Download: How to Play This Epic RPG Game on Your Device
| | H2: Introduction |
Introduction
Do you love Disney and Pixar movies? Do you enjoy playing strategy RPG games? If you answered yes to both questions, then you will love Disney Heroes Battle Mode. This is a game that lets you collect and battle with over 100 Disney and Pixar heroes from your favorite films. You can team up with other players for missions and campaigns, upgrade your characters with abilities and gear, and explore a new digital world that is under attack by a virus.
But how can you download and play this game on your device? In this article, we will show you how to do that in a few simple steps. We will also give you some tips and tricks on how to get started with this game and have fun with your favorite Disney and Pixar heroes.
| | H2: What is Disney Heroes Battle Mode? |
What is Disney Heroes Battle Mode?
Disney Heroes Battle Mode is a free-to-play action-packed strategy RPG game that was released in 2018 by PerBlue Entertainment. The game features characters from various Disney and Pixar films, such as The Incredibles, Wreck-It Ralph, Toy Story, Zootopia, Monsters Inc., The Nightmare Before Christmas, The Lion King, Pirates of the Caribbean, Frozen, Moana, Big Hero 6, Wall-E, Aladdin, Beauty and the Beast, Alice in Wonderland, Mulan, Hercules, Stitch, Mickey Mouse, Timon & Pumbaa, Captain Jack Sparrow, Merlin, Maleficent, Scar, Elsa, Buzz Lightyear, and many more.
The game's story revolves around Wreck-It Ralph and Vanellope von Schweetz who find a beta key to unlock a mysterious new RPG game inside a massive digital city. However, they soon discover that the city is corrupted by an evil virus that turns everything into enemies. They also meet other heroes who are pulled into the city and need their help to fight back against the virus.
The game allows you to create your own team of heroes from different films and genres. You can customize their skills and equipment to suit your playstyle and strategy. You can also join forces with other players for cooperative missions and special campaigns. You can also compete with other players in the arena and climb the leaderboards.
| | H2: How to Download Disney Heroes Battle Mode on Your Device |
How to Download Disney Heroes Battle Mode on Your Device
| | H3: For Android Devices |
For Android Devices
If you have an Android device (such as a smartphone or tablet), you can download Disney Heroes Battle Mode from Google Play Store. Here are the steps to follow:
Open Google Play Store on your device
Search for Disney Heroes Battle Mode in the search bar
-
Tap on the game icon and then tap on Install
-
Wait for the game to download and install on your device
-
Open the game and enjoy playing with your favorite Disney and Pixar heroes
-
Note: The game requires Android 5.0 or higher and at least 1 GB of RAM to run smoothly. The game also requires an internet connection to play.
-
disney heroes battle mode pc game
-how to play disney heroes battle mode on mac
-disney heroes battle mode apk mod
-disney heroes battle mode ios app
-disney heroes battle mode best characters
-disney heroes battle mode tips and tricks
-disney heroes battle mode hack no survey
-disney heroes battle mode latest update
-disney heroes battle mode online free
-disney heroes battle mode cheats codes
-disney heroes battle mode tier list 2023
-disney heroes battle mode guild war rewards
-disney heroes battle mode reddit community
-disney heroes battle mode support email
-disney heroes battle mode wiki fandom
-disney heroes battle mode frozen team
-disney heroes battle mode incredibles team
-disney heroes battle mode toy story team
-disney heroes battle mode zootopia team
-disney heroes battle mode wreck it ralph team
-disney heroes battle mode alice in wonderland team
-disney heroes battle mode pirates of the caribbean team
-disney heroes battle mode beauty and the beast team
-disney heroes battle mode lion king team
-disney heroes battle mode nightmare before christmas team
-disney heroes battle mode tron legacy team
-disney heroes battle mode aladdin team
-disney heroes battle mode mulan team
-disney heroes battle mode moana team
-disney heroes battle mode big hero 6 team
-disney heroes battle mode wall-e team
-disney heroes battle mode robin hood team
-disney heroes battle mode hercules team
-disney heroes battle mode merlin team
-disney heroes battle mode mickey mouse team
-disney heroes battle mode stitch team
-disney heroes battle mode buzz lightyear team
-disney heroes battle mode elsa team
-disney heroes battle mode scar team
-disney heroes battle mode maleficent team
-disney heroes battle mode jack sparrow team
-disney heroes battle mode woody team
-disney heroes battle mode timon and pumbaa team
-disney heroes battle mode baymax team
-disney heroes battle mode eve team
-disney heroes battle mode mr incredible team
-disney heroes battle mode elastigirl team
-disney heroes battle mode chief bogo team
| H3: For iOS Devices |
For iOS Devices
If you have an iOS device (such as an iPhone or iPad), you can download Disney Heroes Battle Mode from App Store. Here are the steps to follow:
Open App Store on your device
-
Search for Disney Heroes Battle Mode in the search bar
-
Tap on the game icon and then tap on Get
-
Wait for the game to download and install on your device
-
Open the game and enjoy playing with your favorite Disney and Pixar heroes
-
Note: The game requires iOS 9.0 or later and at least 1 GB of RAM to run smoothly. The game also requires an internet connection to play.
| H2: How to Choose the Best Characters for Your Team |
How to Choose the Best Characters for Your Team
Now that you know how to download and play Disney Heroes Battle Mode on your device, you might be wondering how to choose the best characters for your team. After all, there are over 100 heroes to collect and unlock, each with their own skills, roles, and synergies. How can you decide which ones to use and which ones to bench?
Well, there are a few factors to consider when choosing your team, such as:
The roles of the characters: As we mentioned before, there are four main roles in the game: Tank, Damage, Support, and Control. Each role has a different function and contribution to the team. Tanks are the frontline heroes who can absorb damage and protect their allies. Damage heroes are the ones who deal the most damage and eliminate enemies. Support heroes are the ones who heal, buff, or shield their allies. Control heroes are the ones who debuff, stun, or manipulate enemies. Ideally, you want to have a balanced team that covers all these roles.
-
The tiers of the characters: Not all characters are created equal in Disney Heroes Battle Mode. Some are more powerful, versatile, or useful than others. That's why there are tier lists that rank the characters from best to worst based on their performance and popularity in the game. You can find many tier lists online, such as [this one](^6^), [this one](^7^), or [this one](^8^). Of course, tier lists are not absolute and may vary depending on your preferences and playstyle. But they can give you a general idea of which characters are worth investing in and which ones are not.
-
The synergies of the characters: Another important factor to consider when choosing your team is how well the characters work together. Some characters have skills that complement or enhance each other, creating powerful combos and effects. For example, Elsa and Olaf have skills that freeze enemies, making them vulnerable to extra damage from other frozen heroes. Or Stitch and Hiro have skills that sap enemies, reducing their energy gain and making them easier to control. You can find out which characters have synergies by checking their friendship disks or their collection buffs.
-
By taking into account these factors, you can create a strong and effective team that can handle any challenge in Disney Heroes Battle Mode.
| H2: How to Play Disney Heroes Battle Mode Like a Pro |
How to Play Disney Heroes Battle Mode Like a Pro
Now that you have downloaded the game and chosen your team, you might be wondering how to play Disney Heroes Battle Mode like a pro. Well, there are some tips and tricks that can help you improve your skills and strategies in this game. Here are some of them:
Learn the basics of combat: The combat in Disney Heroes Battle Mode is mostly automatic, meaning that your heroes will attack and use their skills on their own. However, you can still influence the outcome of the battle by using your heroes' white skills manually. These are the skills that have a blue bar under them, and they can be activated by tapping on the hero's portrait. White skills are usually the most powerful and impactful skills in the game, so you should use them wisely and at the right time. For example, you can use them to finish off a low-health enemy, to counter an enemy's skill, or to turn the tide of the battle in your favor.
-
Upgrade your heroes regularly: As we mentioned before, leveling up your heroes is one of the best ways to increase their power and stats. However, there are other ways to upgrade your heroes as well, such as enhancing their skills, equipping them with badges, promoting their rarity, unlocking their friendship disks, and increasing their mods. All of these methods require different resources and materials that you can obtain by playing the game and completing various tasks. You should always try to upgrade your heroes as much as possible, as this will make them stronger and more versatile in combat.
-
Explore the different game modes: Disney Heroes Battle Mode has a lot of content and features to offer, besides the main campaign mode. There are also other game modes that you can play and enjoy, such as the arena, the coliseum, the city watch, the surge, the heist, the trials, the port, the invasion, and more. Each game mode has its own rules, rewards, and challenges that will test your skills and strategies. You should try to play all of them regularly, as they will help you earn more resources, materials, and rewards that you can use to upgrade your heroes and progress faster in the game.
-
Join a guild and make friends: Disney Heroes Battle Mode is not only a solo game, but also a social game. You can join a guild and make friends with other players who share your passion for Disney and Pixar films. By joining a guild, you can access some exclusive features and benefits, such as guild chat, guild perks, guild wars, guild contests, and more. You can also cooperate with your guildmates for some game modes, such as the surge or the invasion. Moreover, you can make friends with other players by sending them friend requests or messages. You can also unlock friendship campaigns with some heroes by befriending them or their partners.
-
By following these tips and tricks, you can play Disney Heroes Battle Mode like a pro and have more fun with this epic RPG game.
| H2: Conclusion |
Conclusion
Disney Heroes Battle Mode is a game that every Disney and Pixar fan should try. It is a game that combines the best of both worlds: the charm and nostalgia of your favorite films, and the thrill and strategy of RPG games. You can download and play this game on your Android or iOS device for free, and enjoy collecting and battling with over 100 heroes from different films and genres. You can also upgrade your heroes, explore different game modes, join a guild, and make friends with other players. Disney Heroes Battle Mode is a game that will keep you entertained and engaged for hours.
So what are you waiting for? Download Disney Heroes Battle Mode today and join the epic adventure with your favorite Disney and Pixar heroes!
| | H2: FAQs |
FAQs
Here are some frequently asked questions about Disney Heroes Battle Mode:
How can I get more heroes in Disney Heroes Battle Mode? You can get more heroes in Disney Heroes Battle Mode by opening crates, completing missions, participating in events, or buying them with in-game currency. You can also unlock some heroes by completing their friendship campaigns.
-
How can I get more resources and materials in Disney Heroes Battle Mode? You can get more resources and materials in Disney Heroes Battle Mode by playing the game regularly, completing various tasks, winning battles, opening rewards, or buying them with real money.
-
How can I contact the support team of Disney Heroes Battle Mode? You can contact the support team of Disney Heroes Battle Mode by tapping on the settings icon on the main screen, then tapping on support, and then tapping on contact us. You can also visit their official website or forum for more information.
-
Is Disney Heroes Battle Mode safe for kids? Disney Heroes Battle Mode is rated 9+ on App Store and Everyone 10+ on Google Play Store. The game contains cartoon violence, mild language, and in-app purchases. Parents should supervise their kids when playing this game and set parental controls if needed.
-
Can I play Disney Heroes Battle Mode offline? No, you cannot play Disney Heroes Battle Mode offline. The game requires an internet connection to play.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download 60 Second Atomic Adventure APK OBB Gratis A Survival Game with a Twist.md b/spaces/fatiXbelha/sd/Download 60 Second Atomic Adventure APK OBB Gratis A Survival Game with a Twist.md
deleted file mode 100644
index 332b4ed2ec17142eea340378ad23241de36cd045..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download 60 Second Atomic Adventure APK OBB Gratis A Survival Game with a Twist.md
+++ /dev/null
@@ -1,96 +0,0 @@
-
-
Download 60 Second Atomic Adventure APK OBB Gratis: A Guide
-
If you are looking for a fun and challenging game that will test your survival skills in a post-apocalyptic scenario, then you might want to try 60 Second Atomic Adventure. This is a game that will make you think fast, act smart, and deal with the consequences of your choices. But what if you don't want to pay for the game or deal with annoying ads and in-app purchases? Is there a way to download 60 Second Atomic Adventure APK OBB gratis? In this article, we will answer these questions and more. We will tell you what 60 Second Atomic Adventure is, why you might want to download it for free, and how to do it safely and easily.
-
download 60 second atomic adventure apk obb gratis
60 Second Atomic Adventure is a dark comedy adventure game developed by Robot Gentleman. It was released in 2015 for Windows and Mac, and later ported to iOS and Android devices. The game is set in a fictional 1950s America, where a nuclear war is about to break out. You play as Ted, a family man who has only 60 seconds to gather his family and some essential items before rushing to the fallout shelter in his backyard. Once there, you have to survive as long as possible with limited resources and random events. The game has multiple endings depending on your actions and choices.
-
A brief introduction to the game and its features
-
60 Second Atomic Adventure is a game that combines elements of strategy, simulation, and roguelike genres. It has four main features:
-
-
A unique art style that blends pixel art with hand-drawn illustrations
-
A dynamic soundtrack that adapts to the situation and mood of the game
-
A humorous and satirical tone that mocks the Cold War era and its propaganda
-
A replayable gameplay that offers different scenarios and outcomes every time
-
-
How to play 60 Second Atomic Adventure
-
The game has four different modes that you can choose from:
-
The main mode: Atomic Drill
-
This is the default mode of the game, where you have to complete two phases: scavenge and survive. In the scavenge phase, you have 60 seconds to grab as many items as you can from your house before heading to the shelter. You have to decide what items are essential for your survival, such as food, water, medicine, weapons, tools, etc. You also have to choose which family members you want to take with you: your wife Dolores, your son Timmy, your daughter Mary Jane, or your pet (a dog or a cat). You can also find some special items that can unlock new events or endings in the game.
-
60 second atomic adventure apk obb free download
-download 60 seconds! atomic adventure full apk + obb
-60 second atomic adventure mod apk obb download gratis
-how to download 60 second atomic adventure apk obb for free
-60 second atomic adventure apk + obb latest version download
-download 60 second atomic adventure android apk obb gratis
-60 second atomic adventure apk obb offline download free
-60 second atomic adventure apk obb unlimited money download
-download 60 second atomic adventure apk + obb data gratis
-60 second atomic adventure apk obb highly compressed download
-60 second atomic adventure premium apk obb free download
-download 60 seconds! atomic adventure 1.3.107 apk full + obb
-60 second atomic adventure cracked apk obb download gratis
-where to download 60 second atomic adventure apk obb for free
-60 second atomic adventure apk + obb file download free
-download 60 second atomic adventure modded apk obb gratis
-60 second atomic adventure hack apk obb download free
-download 60 seconds! atomic adventure android game apk + obb
-60 second atomic adventure full unlocked apk obb download
-download 60 second atomic adventure survival game apk obb gratis
-60 second atomic adventure pro apk obb free download
-download 60 seconds! atomic adventure hd apk + obb data
-60 second atomic adventure cheat apk obb download gratis
-how to install 60 second atomic adventure apk obb for free
-60 second atomic adventure apk + obb mega download free
-download 60 second atomic adventure paid apk obb gratis
-60 second atomic adventure patch apk obb download free
-download 60 seconds! atomic adventure mod apk + obb unlimited everything
-60 second atomic adventure unlocked apk obb download gratis
-how to play 60 second atomic adventure apk obb for free
-60 second atomic adventure apk + obb mediafire download free
-download 60 second atomic adventure original apk obb gratis
-60 second atomic adventure update apk obb download free
-download 60 seconds! atomic adventure pc game apk + obb emulator
-60 second atomic adventure no ads apk obb download gratis
-how to fix 60 second atomic adventure apk obb for free
-60 second atomic adventure apk + obb google drive download free
-download 60 second atomic adventure new version apk obb gratis
-60 second atomic adventure tips and tricks apk obb download free
-download 60 seconds! atomic adventure gameplay video apk + obb youtube
-
In the survive phase, you have to manage your resources and make decisions that will affect your fate. You have to ration your food and water, deal with injuries and illnesses, fend off raiders and can play it on most Android devices. The game does not have high system requirements or specifications. You can run it smoothly and easily on your phone or tablet.
-
The risks of downloading the game for free
-
However, downloading 60 Second Atomic Adventure APK OBB gratis also comes with some risks that you should be aware of, such as:
-
Potential malware, viruses, or spyware
-
One of the dangers of downloading the game for free is that you might expose your device to malware, viruses, or spyware. These are malicious programs that can harm your device, steal your data, or compromise your security. You might download a corrupted or infected file that can damage your device or cause it to malfunction.
-
Legal issues or copyright infringement
-
Another risk of downloading the game for free is that you might violate the law or the rights of the developers. 60 Second Atomic Adventure is a copyrighted game that belongs to Robot Gentleman. By downloading it for free, you are breaking the terms and conditions of the game and the Google Play Store. You might face legal consequences or penalties for piracy or theft.
-
Poor performance or compatibility issues
-
A final risk of downloading the game for free is that you might experience poor performance or compatibility issues. The game might not work properly or at all on your device. You might encounter bugs, glitches, crashes, or errors. The game might not be compatible with your device model, operating system, or software version.
-
How to download 60 Second Atomic Adventure APK OBB gratis?
-
If you still want to download 60 Second Atomic Adventure APK OBB gratis, despite the risks, you need to follow some steps to do it safely and easily. Here are the steps you need to take:
-
The steps to download and install the game for free
-
To download and install 60 Second Atomic Adventure APK OBB gratis, you need to do the following:
-
Find a reliable source for the APK OBB files
-
The first step is to find a reliable source for the APK OBB files of the game. These are the files that contain the game data and installation package. You need to find a website that offers these files for free and without any viruses or malware. You can search online for reviews, ratings, or feedback from other users who have downloaded the files before. You can also use a trusted antivirus program to scan the files before downloading them.
-
Enable unknown sources on your device settings
-
The second step is to enable unknown sources on your device settings. This is a security feature that prevents you from installing apps from sources other than the Google Play Store. You need to disable this feature temporarily to install the APK file of the game. To do this, go to your device settings, then security, then unknown sources, and toggle it on.
-
Download and install the APK file first, then the OBB file second
-
The third step is to download and install the APK file first, then the OBB file second. The APK file is the installation package of the game, while the OBB file is the data file of the game. You need to download both files and save them in your device storage. Then, you need to install the APK file by tapping on it and following the instructions. After that, you need to copy and paste the OBB file into the Android/OBB folder in your device storage.
-
Launch the game and enjoy
-
The final step is to launch the game and enjoy. You can find the game icon on your home screen or app drawer. Tap on it and start playing 60 Second Atomic Adventure for free.
-
Conclusion
-
60 Second Atomic Adventure is a fun and challenging game that will test your survival skills in a post-apocalyptic scenario. You can download it for free by following some steps, but you should also be aware of some risks. If you want to play 60 Second Atomic Adventure APK OBB gratis, make sure you do it safely and legally.
-
Frequently Asked Questions
-
-
What are the minimum requirements to play 60 Second Atomic Adventure on Android?
-
The minimum requirements to play 60 Second Atomic Adventure on Android are: Android 4.1 or higher, 1 GB of RAM, and 200 MB of free space.
-
What are some tips and tricks to play 60 Second Atomic Adventure?
-
Some tips and tricks to play 60 Second Atomic Adventure are: plan ahead before scavenging, prioritize food and water over other items, use items wisely and sparingly, explore the wasteland carefully and cautiously, and try different endings and modes.
-
What are some alternatives to 60 Second Atomic Adventure?
-
Some alternatives to 60 Second Atomic Adventure are: Fallout Shelter, This War of Mine, Sheltered, and 60 Parsecs.
-
Is 60 Second Atomic Adventure available for other platforms?
-
Yes, 60 Second Atomic Adventure is also available for Windows, Mac, iOS, and Nintendo Switch.
-
Is 60 Second Atomic Adventure a multiplayer game?
-
No, 60 Second Atomic Adventure is a single-player game. However, you can share your screenshots and stories with other players online.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fatiXbelha/sd/Download Love by Ali Jita - A Song Produced by the Intelligent Beat Maker Don Adah.md b/spaces/fatiXbelha/sd/Download Love by Ali Jita - A Song Produced by the Intelligent Beat Maker Don Adah.md
deleted file mode 100644
index 2a9ab293d777270be506641e92ed92d42336f03d..0000000000000000000000000000000000000000
--- a/spaces/fatiXbelha/sd/Download Love by Ali Jita - A Song Produced by the Intelligent Beat Maker Don Adah.md
+++ /dev/null
@@ -1,117 +0,0 @@
-
-
How to Download Love by Ali Jita
-
If you are a fan of Hausa music, you might have heard of love by ali jita, a romantic song by one of the most popular Hausa musicians in Nigeria. The song was released in 2018 and has been ranked by BBC Hausa as the second-best Hausa music of that year. The song has also been used in several Kannywood movies, making it even more popular among Hausa speakers and lovers.
-
Downloading music online is a convenient and affordable way to enjoy your favorite songs anytime and anywhere. You can also access a wide variety of music genres and artists from different sources and platforms. However, downloading music online also comes with some challenges, such as finding reliable and legal websites, avoiding malware and viruses, and respecting the rights of the artists and creators.
In this article, we will show you how to download love by ali jita from three different sources: iTunes, YouTube, and other websites. We will also provide you with some tips and warnings for downloading music online safely and legally. Follow these simple steps and enjoy listening to love by ali jita on your device.
-
How to Download Love by Ali Jita from iTunes
-
iTunes is one of the most popular and trusted platforms for buying and downloading music online. You can use iTunes on your computer or smartphone to access millions of songs from various artists and genres. Here is how you can download love by ali jita from iTunes:
-
-
Install iTunes on your computer or smartphone. If you are using a Windows computer, you will need to download and install iTunes from Apple's website. If you are using a Mac computer or an iPhone, iTunes will be installed by default.
-
Sign in with your Apple ID and enter your payment information. If you don't have an Apple ID, you will need to create one here. You will also need to enter your credit card or PayPal details to be able to buy music from iTunes.
-
Search for love by ali jita on the iTunes store and click the price to buy it. You can use the search bar at the top-right corner of iTunes to type in the name of the song, album, or artist. You can also browse through different categories and genres of music available on iTunes. - Once you find love by ali jita, you will see the price of the song next to it. Click on the price to buy the song. You can also preview the song by clicking on the play button before buying it.
-
Enter your Apple ID password or use Touch ID to confirm your purchase. You will need to enter your Apple ID password or use your fingerprint to verify your identity and authorize the payment. You will also receive a confirmation email from iTunes with the details of your purchase.
-
View the music file on your device or transfer it to another location or service. After buying the song, you can view it on your device's music library or iTunes library. You can also sync it with other devices or services, such as iCloud, Spotify, or Google Play Music.
-
-
Congratulations, you have successfully downloaded love by ali jita from iTunes. Enjoy listening to the song and share it with your friends and family.
-
How to Download Love by Ali Jita from YouTube
-
YouTube is another popular and convenient platform for watching and listening to music online. You can find almost any song or video on YouTube, including love by ali jita official video. However, YouTube does not allow you to download music directly from its website or app. You will need to use a third-party website or app that allows you to convert YouTube videos into MP3 files and download them to your device. Here is how you can download love by ali jita from YouTube:
-
-
Visit YouTube and search for love by ali jita official video. You can use the search bar at the top of YouTube to type in the name of the song, album, or artist. You can also browse through different categories and genres of music available on YouTube.
-
Copy the URL of the video from the address bar or the share button. Once you find love by ali jita official video, you will see the URL of the video on the address bar of your browser or on the share button below the video. Copy the URL by right-clicking on it and selecting copy or by tapping and holding on it and selecting copy.
-
Visit a third-party website or app that allows you to download YouTube videos as MP3 files, such as ytmp3.cc or vidmate. There are many websites and apps that offer this service for free or for a fee. Some of them are ytmp3.cc, vidmate, y2mate, savefrom.net, and 4kdownload. You can use any of them as long as they are reliable and legal.
-
Paste the URL of the video into the search box and click convert or download. Once you visit the website or app of your choice, you will see a search box where you can paste the URL of the video that you copied from YouTube. Paste the URL by right-clicking on it and selecting paste or by tapping and holding on it and selecting paste. Then, click on the convert or download button to start the process.
-
Save the MP3 file to your device or transfer it to another location or service. After converting or downloading the video, you will see a link or a button to save the MP3 file to your device. Click on it and choose a location where you want to save it. You can also transfer it to another device or service, such as Dropbox, Google Drive, or SoundCloud.
-
-
Congratulations, you have successfully downloaded love by ali jita from YouTube. Enjoy listening to the song and share it with your friends and family.
-
download love by ali jita mp3
-download love by ali jita video
-download love by ali jita lyrics
-download love by ali jita cover
-download love by ali jita remix
-download love by ali jita audio
-download love by ali jita song
-download love by ali jita album
-download love by ali jita shazam
-download love by ali jita youtube
-download love by ali jita instrumental
-download love by ali jita ringtone
-download love by ali jita karaoke
-download love by ali jita music
-download love by ali jita hausa song
-download love by ali jita free mp3
-download love by ali jita official video
-download love by ali jita 320kbps
-download love by ali jita online
-download love by ali jita latest song
-download love by ali jita ft hamisu breaker
-download love by ali jita live performance
-download love by ali jita dj mix
-download love by ali jita whatsapp status
-download love by ali jita mp4 video
-download love by ali jita produced by drimzbeat
-download love by ali jita full song
-download love by ali jita single
-download love by ali jita arewa angel
-download love by ali jita mai waka
-download love by ali jita mata
-download love by ali jita asha ruwa remix
-download love by ali jita aure yana da dadi
-download love by ali jita tambura
-download love by ali jita super star
-download love by ali jita halimatu sadiya
-download love by ali jita gimbiya aisha
-download love by ali jita call on me
-download love by ali jita new song 2023
-download love by ali jita best of 2023 album
-
How to Download Love by Ali Jita from Other Sources
-
If you are looking for other sources to download love by ali jita, you can also visit other websites that offer free or paid music downloads. These websites may have a larger or more diverse collection of music than iTunes or YouTube. However, they may also have some drawbacks, such as lower quality, limited availability, or legal issues. Here is how you can download love by ali jita from other sources:
-
-
Visit other websites that offer free or paid music downloads, such as Bandcamp, DatPiff, Free Music Archive, or The Internet Archive. There are many websites that offer free or paid music downloads for various genres and artists. Some of them are Bandcamp, DatPiff, Free Music Archive, The Internet Archive, Jamendo, SoundClick, and Audiomack. You can use any of them as long as they are reliable and legal.
-
Search for love by ali jita or browse through the categories and genres of music available. Once you visit the website of your choice, you can use the search bar to type in the name of the song, album, or artist. You can also browse through different categories and genres of music available on the website.
-
Click on the download button or link and choose the format and quality of the music file. Once you find love by ali jita, you will see a download button or link next to it. Click on it and choose the format and quality of the music file that you want to download. Some websites may offer different formats, such as MP3, WAV, FLAC, or OGG. Some websites may also offer different quality levels, such as 128 kbps, 256 kbps, or 320 kbps.
-
Save the music file to your device or transfer it to another location or service. After downloading the music file, you will see a link or a button to save it to your device. Click on it and choose a location where you want to save it. You can also transfer it to another device or service, such as Dropbox, Google Drive, or SoundCloud.
-
-
Congratulations, you have successfully downloaded love by ali jita from other sources. Enjoy listening to the song and share it with your friends and family.
-
Conclusion
-
In this article, we have shown you how to download love by ali jita from three different sources: iTunes, YouTube, and other websites. We have also provided you with some tips and warnings for downloading music online safely and legally. Downloading music online is a convenient and affordable way to enjoy your favorite songs anytime and anywhere. However, you should also be aware of the potential risks and challenges that come with it.
-
Here are some tips and warnings for downloading music online:
-
-
Always check the reliability and legality of the website or app that you are using to download music. Avoid websites or apps that are suspicious, unknown, or have negative reviews.
-
Always respect the rights of the artists and creators of the music that you are downloading. Do not download music that is protected by copyright or that requires permission from the artist or creator.
-
Always scan the music file that you are downloading for malware or viruses. Use a reputable antivirus software or app to protect your device and data from potential harm.
-
Always backup your music files to another location or service. Use a cloud storage service or an external hard drive to store your music files in case of loss or damage.
-
Always enjoy your music responsibly and ethically. Do not use your music for illegal or immoral purposes. Do not share your music with others without their consent or knowledge.
-
-
We hope that this article has helped you learn how to download love by ali jita from different sources. We also hope that you have enjoyed listening to the song and that you have discovered other Hausa songs and artists that you like. If you have any feedback or suggestions for other music downloads, please let us know in the comments below.
-
Frequently Asked Questions
-
Here are some frequently asked questions about downloading love by ali jita:
-
-
What is love by ali jita? Love by ali jita is a romantic song by Ali Jita, one of the most popular Hausa musicians in Nigeria. The song was released in 2018 and has been ranked by BBC Hausa as the second-best Hausa music of that year. The song has also been used in several Kannywood movies, making it even more popular among Hausa speakers and lovers.
-
Who is Ali Jita? Ali Jita is a Nigerian singer, songwriter, producer, and actor who specializes in Hausa music. He was born in Kano State in 1983 and started his musical career in 2005. He has released several albums and singles, such as Mata Ku Dau Turame, Love, Soyayya Dadi, Kece Flavour, and Hafeez. He has also won several awards and recognition for his work, such as Arewa Music Award, City People Music Award, MTN Music Award, and Kannywood Award.
-
What is Hausa music? Hausa music is a type of music that originates from the Hausa people of northern Nigeria and southern Niger. Hausa music is influenced by Arabic, Islamic, African, and Western musical traditions. Hausa music is characterized by its use of instruments such as drums, flutes, guitars, keyboards, violins, trumpets, saxophones, and synthesizers. Hausa music is also known for its use of lyrics that express love, praise, religion, culture, history, politics, and social issues.
-
What are some other popular Hausa songs and artists? Some other popular Hausa songs and artists are:
-
-
Mata Ku Dau Turame by Nura M Inuwa
-
Soyayya D -adi by Umar M Shareef
-
Kece Flavour by Ali Jita
-
Hafeez by Ali Jita
-
Mai Zamani by Nura M Inuwa
-
Yarinya by Classiq
-
Zan Rayu Dake by Morell
-
Mun Gode by Solomon Lange
-
Babban Rana by Ibrahim Yala
-
Yar Fulani by Umar M Shareef
-
Inda Rai by 2Baba
-
-
How can I download music online safely and legally? To download music online safely and legally, you should follow these tips and warnings:
-
-
Always check the reliability and legality of the website or app that you are using to download music. Avoid websites or apps that are suspicious, unknown, or have negative reviews.
-
Always respect the rights of the artists and creators of the music that you are downloading. Do not download music that is protected by copyright or that requires permission from the artist or creator.
-
Always scan the music file that you are downloading for malware or viruses. Use a reputable antivirus software or app to protect your device and data from potential harm.
-
Always backup your music files to another location or service. Use a cloud storage service or an external hard drive to store your music files in case of loss or damage.
-
Always enjoy your music responsibly and ethically. Do not use your music for illegal or immoral purposes. Do not share your music with others without their consent or knowledge.
-
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/fb700/chat3/crazy_functions/test_project/cpp/cppipc/prod_cons.h b/spaces/fb700/chat3/crazy_functions/test_project/cpp/cppipc/prod_cons.h
deleted file mode 100644
index c9004bb8043a12e32814436baa6262a00c8ef68e..0000000000000000000000000000000000000000
--- a/spaces/fb700/chat3/crazy_functions/test_project/cpp/cppipc/prod_cons.h
+++ /dev/null
@@ -1,433 +0,0 @@
-#pragma once
-
-#include
-#include
-#include
-#include
-#include
-
-#include "libipc/def.h"
-
-#include "libipc/platform/detail.h"
-#include "libipc/circ/elem_def.h"
-#include "libipc/utility/log.h"
-#include "libipc/utility/utility.h"
-
-namespace ipc {
-
-////////////////////////////////////////////////////////////////
-/// producer-consumer implementation
-////////////////////////////////////////////////////////////////
-
-template
-struct prod_cons_impl;
-
-template <>
-struct prod_cons_impl> {
-
- template
- struct elem_t {
- std::aligned_storage_t data_ {};
- };
-
- alignas(cache_line_size) std::atomic rd_; // read index
- alignas(cache_line_size) std::atomic wt_; // write index
-
- constexpr circ::u2_t cursor() const noexcept {
- return 0;
- }
-
- template
- bool push(W* /*wrapper*/, F&& f, E* elems) {
- auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
- if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
- return false; // full
- }
- std::forward(f)(&(elems[cur_wt].data_));
- wt_.fetch_add(1, std::memory_order_release);
- return true;
- }
-
- /**
- * In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
- * So we could just disconnect all connections of receiver, and return false.
- */
- template
- bool force_push(W* wrapper, F&&, E*) {
- wrapper->elems()->disconnect_receiver(~static_cast(0u));
- return false;
- }
-
- template
- bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
- auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
- if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
- return false; // empty
- }
- std::forward(f)(&(elems[cur_rd].data_));
- std::forward(out)(true);
- rd_.fetch_add(1, std::memory_order_release);
- return true;
- }
-};
-
-template <>
-struct prod_cons_impl>
- : prod_cons_impl> {
-
- template
- bool force_push(W* wrapper, F&&, E*) {
- wrapper->elems()->disconnect_receiver(1);
- return false;
- }
-
- template class E, std::size_t DS, std::size_t AS>
- bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
- byte_t buff[DS];
- for (unsigned k = 0;;) {
- auto cur_rd = rd_.load(std::memory_order_relaxed);
- if (circ::index_of(cur_rd) ==
- circ::index_of(wt_.load(std::memory_order_acquire))) {
- return false; // empty
- }
- std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
- if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
- std::forward(f)(buff);
- std::forward(out)(true);
- return true;
- }
- ipc::yield(k);
- }
- }
-};
-
-template <>
-struct prod_cons_impl>
- : prod_cons_impl> {
-
- using flag_t = std::uint64_t;
-
- template
- struct elem_t {
- std::aligned_storage_t data_ {};
- std::atomic f_ct_ { 0 }; // commit flag
- };
-
- alignas(cache_line_size) std::atomic ct_; // commit index
-
- template
- bool push(W* /*wrapper*/, F&& f, E* elems) {
- circ::u2_t cur_ct, nxt_ct;
- for (unsigned k = 0;;) {
- cur_ct = ct_.load(std::memory_order_relaxed);
- if (circ::index_of(nxt_ct = cur_ct + 1) ==
- circ::index_of(rd_.load(std::memory_order_acquire))) {
- return false; // full
- }
- if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
- break;
- }
- ipc::yield(k);
- }
- auto* el = elems + circ::index_of(cur_ct);
- std::forward(f)(&(el->data_));
- // set flag & try update wt
- el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release);
- while (1) {
- auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
- if (cur_ct != wt_.load(std::memory_order_relaxed)) {
- return true;
- }
- if ((~cac_ct) != cur_ct) {
- return true;
- }
- if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
- return true;
- }
- wt_.store(nxt_ct, std::memory_order_release);
- cur_ct = nxt_ct;
- nxt_ct = cur_ct + 1;
- el = elems + circ::index_of(cur_ct);
- }
- return true;
- }
-
- template
- bool force_push(W* wrapper, F&&, E*) {
- wrapper->elems()->disconnect_receiver(1);
- return false;
- }
-
- template class E, std::size_t DS, std::size_t AS>
- bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
- byte_t buff[DS];
- for (unsigned k = 0;;) {
- auto cur_rd = rd_.load(std::memory_order_relaxed);
- auto cur_wt = wt_.load(std::memory_order_acquire);
- auto id_rd = circ::index_of(cur_rd);
- auto id_wt = circ::index_of(cur_wt);
- if (id_rd == id_wt) {
- auto* el = elems + id_wt;
- auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
- if ((~cac_ct) != cur_wt) {
- return false; // empty
- }
- if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
- wt_.store(cur_wt + 1, std::memory_order_release);
- }
- k = 0;
- }
- else {
- std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
- if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
- std::forward(f)(buff);
- std::forward(out)(true);
- return true;
- }
- ipc::yield(k);
- }
- }
- }
-};
-
-template <>
-struct prod_cons_impl> {
-
- using rc_t = std::uint64_t;
-
- enum : rc_t {
- ep_mask = 0x00000000ffffffffull,
- ep_incr = 0x0000000100000000ull
- };
-
- template
- struct elem_t {
- std::aligned_storage_t data_ {};
- std::atomic rc_ { 0 }; // read-counter
- };
-
- alignas(cache_line_size) std::atomic wt_; // write index
- alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
-
- circ::u2_t cursor() const noexcept {
- return wt_.load(std::memory_order_acquire);
- }
-
- template
- bool push(W* wrapper, F&& f, E* elems) {
- E* el;
- for (unsigned k = 0;;) {
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
- if (cc == 0) return false; // no reader
- el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
- // check all consumers have finished reading this element
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
- circ::cc_t rem_cc = cur_rc & ep_mask;
- if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
- return false; // has not finished yet
- }
- // consider rem_cc to be 0 here
- if (el->rc_.compare_exchange_weak(
- cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) {
- break;
- }
- ipc::yield(k);
- }
- std::forward(f)(&(el->data_));
- wt_.fetch_add(1, std::memory_order_release);
- return true;
- }
-
- template
- bool force_push(W* wrapper, F&& f, E* elems) {
- E* el;
- epoch_ += ep_incr;
- for (unsigned k = 0;;) {
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
- if (cc == 0) return false; // no reader
- el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
- // check all consumers have finished reading this element
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
- circ::cc_t rem_cc = cur_rc & ep_mask;
- if (cc & rem_cc) {
- ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
- cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
- if (cc == 0) return false; // no reader
- }
- // just compare & exchange
- if (el->rc_.compare_exchange_weak(
- cur_rc, epoch_ | static_cast(cc), std::memory_order_release)) {
- break;
- }
- ipc::yield(k);
- }
- std::forward(f)(&(el->data_));
- wt_.fetch_add(1, std::memory_order_release);
- return true;
- }
-
- template
- bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
- if (cur == cursor()) return false; // acquire
- auto* el = elems + circ::index_of(cur++);
- std::forward(f)(&(el->data_));
- for (unsigned k = 0;;) {
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
- if ((cur_rc & ep_mask) == 0) {
- std::forward(out)(true);
- return true;
- }
- auto nxt_rc = cur_rc & ~static_cast(wrapper->connected_id());
- if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
- std::forward(out)((nxt_rc & ep_mask) == 0);
- return true;
- }
- ipc::yield(k);
- }
- }
-};
-
-template <>
-struct prod_cons_impl> {
-
- using rc_t = std::uint64_t;
- using flag_t = std::uint64_t;
-
- enum : rc_t {
- rc_mask = 0x00000000ffffffffull,
- ep_mask = 0x00ffffffffffffffull,
- ep_incr = 0x0100000000000000ull,
- ic_mask = 0xff000000ffffffffull,
- ic_incr = 0x0000000100000000ull
- };
-
- template
- struct elem_t {
- std::aligned_storage_t data_ {};
- std::atomic rc_ { 0 }; // read-counter
- std::atomic f_ct_ { 0 }; // commit flag
- };
-
- alignas(cache_line_size) std::atomic ct_; // commit index
- alignas(cache_line_size) std::atomic epoch_ { 0 };
-
- circ::u2_t cursor() const noexcept {
- return ct_.load(std::memory_order_acquire);
- }
-
- constexpr static rc_t inc_rc(rc_t rc) noexcept {
- return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
- }
-
- constexpr static rc_t inc_mask(rc_t rc) noexcept {
- return inc_rc(rc) & ~rc_mask;
- }
-
- template
- bool push(W* wrapper, F&& f, E* elems) {
- E* el;
- circ::u2_t cur_ct;
- rc_t epoch = epoch_.load(std::memory_order_acquire);
- for (unsigned k = 0;;) {
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
- if (cc == 0) return false; // no reader
- el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
- // check all consumers have finished reading this element
- auto cur_rc = el->rc_.load(std::memory_order_relaxed);
- circ::cc_t rem_cc = cur_rc & rc_mask;
- if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
- return false; // has not finished yet
- }
- else if (!rem_cc) {
- auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
- if ((cur_fl != cur_ct) && cur_fl) {
- return false; // full
- }
- }
- // consider rem_cc to be 0 here
- if (el->rc_.compare_exchange_weak(
- cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed) &&
- epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
- break;
- }
- ipc::yield(k);
- }
- // only one thread/process would touch here at one time
- ct_.store(cur_ct + 1, std::memory_order_release);
- std::forward(f)(&(el->data_));
- // set flag & try update wt
- el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release);
- return true;
- }
-
- template
- bool force_push(W* wrapper, F&& f, E* elems) {
- E* el;
- circ::u2_t cur_ct;
- rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
- for (unsigned k = 0;;) {
- circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
- if (cc == 0) return false; // no reader
- el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
- // check all consumers have finished reading this element
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
- circ::cc_t rem_cc = cur_rc & rc_mask;
- if (cc & rem_cc) {
- ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
- cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
- if (cc == 0) return false; // no reader
- }
- // just compare & exchange
- if (el->rc_.compare_exchange_weak(
- cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast(cc), std::memory_order_relaxed)) {
- if (epoch == epoch_.load(std::memory_order_acquire)) {
- break;
- }
- else if (push(wrapper, std::forward(f), elems)) {
- return true;
- }
- epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
- }
- ipc::yield(k);
- }
- // only one thread/process would touch here at one time
- ct_.store(cur_ct + 1, std::memory_order_release);
- std::forward(f)(&(el->data_));
- // set flag & try update wt
- el->f_ct_.store(~static_cast(cur_ct), std::memory_order_release);
- return true;
- }
-
- template
- bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
- auto* el = elems + circ::index_of(cur);
- auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
- if (cur_fl != ~static_cast(cur)) {
- return false; // empty
- }
- ++cur;
- std::forward(f)(&(el->data_));
- for (unsigned k = 0;;) {
- auto cur_rc = el->rc_.load(std::memory_order_acquire);
- if ((cur_rc & rc_mask) == 0) {
- std::forward(out)(true);
- el->f_ct_.store(cur + N - 1, std::memory_order_release);
- return true;
- }
- auto nxt_rc = inc_rc(cur_rc) & ~static_cast(wrapper->connected_id());
- bool last_one = false;
- if ((last_one = (nxt_rc & rc_mask) == 0)) {
- el->f_ct_.store(cur + N - 1, std::memory_order_release);
- }
- if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
- std::forward(out)(last_one);
- return true;
- }
- ipc::yield(k);
- }
- }
-};
-
-} // namespace ipc
diff --git a/spaces/fclong/summary/fengshen/models/megatron_t5/__init__.py b/spaces/fclong/summary/fengshen/models/megatron_t5/__init__.py
deleted file mode 100644
index 84f78136331c5ef4975697bc6a77910bba7429bd..0000000000000000000000000000000000000000
--- a/spaces/fclong/summary/fengshen/models/megatron_t5/__init__.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# coding=utf-8
-# Copyright 2021 The IDEA Authors. All rights reserved.
-
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-
-# http://www.apache.org/licenses/LICENSE-2.0
-
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import TYPE_CHECKING
-
-from transformers.file_utils import _LazyModule, is_torch_available
-
-
-_import_structure = {
- "configuration_megatron_t5": ["T5Config"],
- "tokenization_megatron_t5": ["T5Tokenizer"],
-}
-
-if is_torch_available():
- _import_structure["modeling_megatron_t5"] = [
- "T5Model",
- "T5EncoderModel",
- "T5ForConditionalGeneration"
- ]
-
-
-if TYPE_CHECKING:
- from .configuration_megatron_t5 import T5Config
- from .tokenization_megatron_t5 import T5Tokenizer
-
- if is_torch_available():
- from .modeling_megatron_t5 import (
- T5Model,
- T5EncoderModel,
- T5ForConditionalGeneration
- )
-
-else:
- import sys
-
- sys.modules[__name__] = _LazyModule(
- __name__, globals()["__file__"], _import_structure)
diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/models/encoders/__init__.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/models/encoders/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/ferdmartin/GradApplicationDocsApp2/custom_models.py b/spaces/ferdmartin/GradApplicationDocsApp2/custom_models.py
deleted file mode 100644
index c06e3b5979569ee1e192f1c6d400ef25d9b49b6a..0000000000000000000000000000000000000000
--- a/spaces/ferdmartin/GradApplicationDocsApp2/custom_models.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from typing import Optional
-from transformers import PreTrainedModel, PretrainedConfig, DistilBertModel, BertModel
-import torch
-from torch import nn
-
-
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
-
-class TransformerBasedModelDistilBert(nn.Module):
- def __init__(self):
- super(TransformerBasedModelDistilBert, self).__init__()
- self.bert = DistilBertModel.from_pretrained('distilbert-base-uncased')
- self.dropout = nn.Dropout(0.55)
- self.fc = nn.Linear(768, 2)
-
- def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
- input_shape = input_ids.size()
- if attention_mask is None:
- attention_mask = torch.ones(input_shape, device=device)
-
- outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
- pooled_output = outputs.last_hidden_state[:, 0, :]
- pooled_output = self.dropout(pooled_output)
- logits = self.fc(pooled_output)
- return logits
-
-class TransformerBasedModelBert(nn.Module):
- def __init__(self):
- super(TransformerBasedModelBert, self).__init__()
- self.bert = BertModel.from_pretrained('bert-base-uncased')
- self.dropout = nn.Dropout(0.55)
- self.fc = nn.Linear(768, 2)
-
- def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
- input_shape = input_ids.size()
- if attention_mask is None:
- attention_mask = torch.ones(input_shape, device=device)
-
- outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
- pooled_output = outputs[1]
- pooled_output = self.dropout(pooled_output)
- logits = self.fc(pooled_output)
- return logits
-
-class MyConfigDistil(PretrainedConfig):
- model_type = "distilbert"
- def __init__(self, final_dropout=0.55, **kwargs):
- super().__init__(**kwargs)
- self.final_dropout = final_dropout
-
-class MyConfig(PretrainedConfig):
- model_type = "bert"
- def __init__(self, final_dropout=0.55, **kwargs):
- super().__init__(**kwargs)
- self.final_dropout = final_dropout
-
-class MyHFModel_DistilBertBased(PreTrainedModel):
- config_class = MyConfigDistil
- def __init__(self, config):
- super().__init__(config)
- self.config = config
- self.model = TransformerBasedModelDistilBert()
- def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
- input_shape = input_ids.size()
- if attention_mask is None:
- attention_mask = torch.ones(input_shape, device=device)
-
- return self.model(input_ids=input_ids, attention_mask=attention_mask)
-
-class MyHFModel_BertBased(PreTrainedModel):
- config_class = MyConfig
- def __init__(self, config):
- super().__init__(config)
- self.config = config
- self.model = TransformerBasedModelBert()
- def forward(self, input_ids: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
- input_shape = input_ids.size()
- if attention_mask is None:
- attention_mask = torch.ones(input_shape, device=device)
-
- return self.model(input_ids=input_ids, attention_mask=attention_mask)
-
-config = MyConfigDistil(0.55)
-HF_DistilBertBasedModelAppDocs = MyHFModel_DistilBertBased(config)
-
-config_db = MyConfig(0.55)
-HF_BertBasedModelAppDocs = MyHFModel_BertBased(config_db)
\ No newline at end of file
diff --git a/spaces/fffiloni/SplitTrack2MusicGen/tests/modules/test_seanet.py b/spaces/fffiloni/SplitTrack2MusicGen/tests/modules/test_seanet.py
deleted file mode 100644
index e5c51b340a2f94fb2828b14daf83d5fad645073d..0000000000000000000000000000000000000000
--- a/spaces/fffiloni/SplitTrack2MusicGen/tests/modules/test_seanet.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-from itertools import product
-
-import pytest
-import torch
-
-from audiocraft.modules.seanet import SEANetEncoder, SEANetDecoder, SEANetResnetBlock
-from audiocraft.modules import StreamableConv1d, StreamableConvTranspose1d
-
-
-class TestSEANetModel:
-
- def test_base(self):
- encoder = SEANetEncoder()
- decoder = SEANetDecoder()
-
- x = torch.randn(1, 1, 24000)
- z = encoder(x)
- assert list(z.shape) == [1, 128, 75], z.shape
- y = decoder(z)
- assert y.shape == x.shape, (x.shape, y.shape)
-
- def test_causal(self):
- encoder = SEANetEncoder(causal=True)
- decoder = SEANetDecoder(causal=True)
- x = torch.randn(1, 1, 24000)
-
- z = encoder(x)
- assert list(z.shape) == [1, 128, 75], z.shape
- y = decoder(z)
- assert y.shape == x.shape, (x.shape, y.shape)
-
- def test_conv_skip_connection(self):
- encoder = SEANetEncoder(true_skip=False)
- decoder = SEANetDecoder(true_skip=False)
-
- x = torch.randn(1, 1, 24000)
- z = encoder(x)
- assert list(z.shape) == [1, 128, 75], z.shape
- y = decoder(z)
- assert y.shape == x.shape, (x.shape, y.shape)
-
- def test_seanet_encoder_decoder_final_act(self):
- encoder = SEANetEncoder(true_skip=False)
- decoder = SEANetDecoder(true_skip=False, final_activation='Tanh')
-
- x = torch.randn(1, 1, 24000)
- z = encoder(x)
- assert list(z.shape) == [1, 128, 75], z.shape
- y = decoder(z)
- assert y.shape == x.shape, (x.shape, y.shape)
-
- def _check_encoder_blocks_norm(self, encoder: SEANetEncoder, n_disable_blocks: int, norm: str):
- n_blocks = 0
- for layer in encoder.model:
- if isinstance(layer, StreamableConv1d):
- n_blocks += 1
- assert layer.conv.norm_type == 'none' if n_blocks <= n_disable_blocks else norm
- elif isinstance(layer, SEANetResnetBlock):
- for resnet_layer in layer.block:
- if isinstance(resnet_layer, StreamableConv1d):
- # here we add + 1 to n_blocks as we increment n_blocks just after the block
- assert resnet_layer.conv.norm_type == 'none' if (n_blocks + 1) <= n_disable_blocks else norm
-
- def test_encoder_disable_norm(self):
- n_residuals = [0, 1, 3]
- disable_blocks = [0, 1, 2, 3, 4, 5, 6]
- norms = ['weight_norm', 'none']
- for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms):
- encoder = SEANetEncoder(n_residual_layers=n_res, norm=norm,
- disable_norm_outer_blocks=disable_blocks)
- self._check_encoder_blocks_norm(encoder, disable_blocks, norm)
-
- def _check_decoder_blocks_norm(self, decoder: SEANetDecoder, n_disable_blocks: int, norm: str):
- n_blocks = 0
- for layer in decoder.model:
- if isinstance(layer, StreamableConv1d):
- n_blocks += 1
- assert layer.conv.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
- elif isinstance(layer, StreamableConvTranspose1d):
- n_blocks += 1
- assert layer.convtr.norm_type == 'none' if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
- elif isinstance(layer, SEANetResnetBlock):
- for resnet_layer in layer.block:
- if isinstance(resnet_layer, StreamableConv1d):
- assert resnet_layer.conv.norm_type == 'none' \
- if (decoder.n_blocks - n_blocks) < n_disable_blocks else norm
-
- def test_decoder_disable_norm(self):
- n_residuals = [0, 1, 3]
- disable_blocks = [0, 1, 2, 3, 4, 5, 6]
- norms = ['weight_norm', 'none']
- for n_res, disable_blocks, norm in product(n_residuals, disable_blocks, norms):
- decoder = SEANetDecoder(n_residual_layers=n_res, norm=norm,
- disable_norm_outer_blocks=disable_blocks)
- self._check_decoder_blocks_norm(decoder, disable_blocks, norm)
-
- def test_disable_norm_raises_exception(self):
- # Invalid disable_norm_outer_blocks values raise exceptions
- with pytest.raises(AssertionError):
- SEANetEncoder(disable_norm_outer_blocks=-1)
-
- with pytest.raises(AssertionError):
- SEANetEncoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7)
-
- with pytest.raises(AssertionError):
- SEANetDecoder(disable_norm_outer_blocks=-1)
-
- with pytest.raises(AssertionError):
- SEANetDecoder(ratios=[1, 1, 2, 2], disable_norm_outer_blocks=7)
diff --git a/spaces/flax-community/Multilingual-VQA/apps/model/flax_clip_vision_bert/__init__.py b/spaces/flax-community/Multilingual-VQA/apps/model/flax_clip_vision_bert/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/flax-community/roberta-hindi/About/results.md b/spaces/flax-community/roberta-hindi/About/results.md
deleted file mode 100644
index 71505284131e2b9de62b2170f2d267fed109b337..0000000000000000000000000000000000000000
--- a/spaces/flax-community/roberta-hindi/About/results.md
+++ /dev/null
@@ -1,10 +0,0 @@
-## Evaluation Results
-
-RoBERTa Hindi is evaluated on downstream tasks. The results are summarized below.
-
-| Task | Task Type | IndicBERT | HindiBERTa | Indic Transformers Hindi BERT | RoBERTa Hindi Guj San | RoBERTa Hindi(ours) |
-|-------------------------|----------------------|-----------|------------|-------------------------------|-----------------------|---------------|
-| BBC News Classification | Genre Classification | **76.44** | 66.86 | **77.6** | 64.9 | 73.67 |
-| WikiNER | Token Classification | - | 90.68 | **95.09** | 89.61 | **92.76** |
-| IITP Product Reviews | Sentiment Analysis | **78.01** | 73.23 | **78.39** | 66.16 | 75.53 |
-| IITP Movie Reviews | Sentiment Analysis | 60.97 | 52.26 | **70.65** | 49.35 | **61.29** |
\ No newline at end of file
diff --git a/spaces/floriankrempl/mtg_rules_bot/mtg/utils/__init__.py b/spaces/floriankrempl/mtg_rules_bot/mtg/utils/__init__.py
deleted file mode 100644
index 1849b7d2f5782bdc0a90c9362b1e49a17cb0f48d..0000000000000000000000000000000000000000
--- a/spaces/floriankrempl/mtg_rules_bot/mtg/utils/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .utils import get_openai_api_key
diff --git a/spaces/florim/MedGPT/autogpt/permanent_memory/__init__.py b/spaces/florim/MedGPT/autogpt/permanent_memory/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/freddyaboulton/llama2-70b-discord-bot/README.md b/spaces/freddyaboulton/llama2-70b-discord-bot/README.md
deleted file mode 100644
index d3d306b8cf29e5d8bbdadd4ebba09358ccc5f3dc..0000000000000000000000000000000000000000
--- a/spaces/freddyaboulton/llama2-70b-discord-bot/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Llama2 70b Discord Bot
-emoji: 🏆
-colorFrom: green
-colorTo: green
-sdk: gradio
-sdk_version: 3.38.0
-app_file: app.py
-pinned: false
-tags:
-- gradio-discord-bot
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/geraldvillaran/dolly-chat/app.py b/spaces/geraldvillaran/dolly-chat/app.py
deleted file mode 100644
index 5edcd8bbdfd2095cba693dcb7a7ce19f1ebd94fd..0000000000000000000000000000000000000000
--- a/spaces/geraldvillaran/dolly-chat/app.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import gradio as gr
-import torch
-from transformers import pipeline
-ans = pipeline(model="databricks/dolly-v2-3b", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto")
-def answer(query):
- out=ans(query)
- return out
-Demo = gr.Interface(fn=answer,inputs='text',outputs='text',examples=[['What is the capital of India ?']])
-Demo.launch()
diff --git a/spaces/goliathaiconsulting/airbnb-search-engine/README.md b/spaces/goliathaiconsulting/airbnb-search-engine/README.md
deleted file mode 100644
index 230f0fb6cae16004bb642eb08ce8f08020958aa7..0000000000000000000000000000000000000000
--- a/spaces/goliathaiconsulting/airbnb-search-engine/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Airbnb Search Engine
-emoji: 🐢
-colorFrom: green
-colorTo: green
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/gordonchan/h2oo/gradio_utils/__init__.py b/spaces/gordonchan/h2oo/gradio_utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/gossminn/fillmorle-app/sftp/modules/span_typing/span_typing.py b/spaces/gossminn/fillmorle-app/sftp/modules/span_typing/span_typing.py
deleted file mode 100644
index 7a9848410d6e354c2d1598b7ee652b293b0a9314..0000000000000000000000000000000000000000
--- a/spaces/gossminn/fillmorle-app/sftp/modules/span_typing/span_typing.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from abc import ABC
-from typing import *
-
-import torch
-from allennlp.common import Registrable
-from allennlp.data.vocabulary import DEFAULT_OOV_TOKEN, Vocabulary
-from allennlp.training.metrics import CategoricalAccuracy
-
-
-class SpanTyping(Registrable, torch.nn.Module, ABC):
- """
- Models the probability p(child_label | child_span, parent_span, parent_label).
- """
- def __init__(
- self,
- n_label: int,
- label_to_ignore: Optional[List[int]] = None,
- ):
- """
- :param label_to_ignore: Label indexes in this list will be ignored.
- Usually this should include NULL, PADDING and UNKNOWN.
- """
- super().__init__()
- self.label_to_ignore = label_to_ignore or list()
- self.acc_metric = CategoricalAccuracy()
- self.onto = torch.ones([n_label, n_label], dtype=torch.bool)
- self.register_buffer('ontology', self.onto)
-
- def load_ontology(self, path: str, vocab: Vocabulary):
- unk_id = vocab.get_token_index(DEFAULT_OOV_TOKEN, 'span_label')
- for line in open(path).readlines():
- entities = [vocab.get_token_index(ent, 'span_label') for ent in line.replace('\n', '').split('\t')]
- parent, children = entities[0], entities[1:]
- if parent == unk_id:
- continue
- self.onto[parent, :] = False
- children = list(filter(lambda x: x != unk_id, children))
- self.onto[parent, children] = True
- self.register_buffer('ontology', self.onto)
-
- def forward(
- self,
- span_vec: torch.Tensor,
- parent_at_span: torch.Tensor,
- span_labels: Optional[torch.Tensor],
- prediction_only: bool = False,
- ) -> Dict[str, torch.Tensor]:
- """
- Inputs: All features for typing a child span.
- Output: The loss of typing and predictions.
- :param span_vec: Shape [batch, span, token_dim]
- :param parent_at_span: Shape [batch, span]
- :param span_labels: Shape [batch, span]
- :param prediction_only: If True, no loss returned & metric will not be updated
- :return:
- loss: Loss for label prediction. (absent of pred_only = True)
- prediction: Predicted labels.
- """
- raise NotImplementedError
-
- def get_metric(self, reset):
- return{
- "typing_acc": self.acc_metric.get_metric(reset) * 100
- }
diff --git a/spaces/gradio/HuBERT/examples/multilingual/data_scripts/utils/dedup.py b/spaces/gradio/HuBERT/examples/multilingual/data_scripts/utils/dedup.py
deleted file mode 100644
index d6fed8c695cf218d3502d6ed8d23015520c0e179..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/examples/multilingual/data_scripts/utils/dedup.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-
-import argparse
-
-def deup(src_file, tgt_file, src_file_out, tgt_file_out):
- seen = set()
- dup_count = 0
- with open(src_file, encoding='utf-8') as fsrc, \
- open(tgt_file, encoding='utf-8') as ftgt, \
- open(src_file_out, 'w', encoding='utf-8') as fsrc_out, \
- open(tgt_file_out, 'w', encoding='utf-8') as ftgt_out:
- for s, t in zip(fsrc, ftgt):
- if (s, t) not in seen:
- fsrc_out.write(s)
- ftgt_out.write(t)
- seen.add((s, t))
- else:
- dup_count += 1
- print(f'number of duplication: {dup_count}')
-
-
-def main():
- parser = argparse.ArgumentParser()
- parser.add_argument("--src-file", type=str, required=True,
- help="src file")
- parser.add_argument("--tgt-file", type=str, required=True,
- help="tgt file")
- parser.add_argument("--src-file-out", type=str, required=True,
- help="src ouptut file")
- parser.add_argument("--tgt-file-out", type=str, required=True,
- help="tgt ouput file")
- args = parser.parse_args()
- deup(args.src_file, args.tgt_file, args.src_file_out, args.tgt_file_out)
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/gradio/HuBERT/fairseq/modules/sparse_multihead_attention.py b/spaces/gradio/HuBERT/fairseq/modules/sparse_multihead_attention.py
deleted file mode 100644
index 3cbd9d6785886e319aab0601517e27df733b6f97..0000000000000000000000000000000000000000
--- a/spaces/gradio/HuBERT/fairseq/modules/sparse_multihead_attention.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-
-import torch
-
-from .multihead_attention import MultiheadAttention
-
-
-class SparseMultiheadAttention(MultiheadAttention):
- """Sparse Multi-Headed Attention.
-
- "Generating Long Sequences with Sparse Transformers". Implements
- fixed factorized self attention, where l=stride and c=expressivity.
- A(1) includes all words in the stride window and A(2) takes a summary of c
- words from the end of each stride window.
- If is_bidirectional=False, we do not include any words past the current word,
- as in the paper.
- """
-
- def __init__(
- self,
- embed_dim,
- num_heads,
- kdim=None,
- vdim=None,
- dropout=0.0,
- bias=True,
- add_bias_kv=False,
- add_zero_attn=False,
- self_attention=False,
- encoder_decoder_attention=False,
- stride=32,
- expressivity=8,
- is_bidirectional=True,
- ):
-
- super().__init__(
- embed_dim,
- num_heads,
- kdim,
- vdim,
- dropout,
- bias,
- add_bias_kv,
- add_zero_attn,
- self_attention,
- encoder_decoder_attention,
- )
-
- self.is_bidirectional = is_bidirectional
- self.stride = stride
- self.expressivity = expressivity
- assert self.stride > 0 and self.stride >= self.expressivity
-
- # Used for Ai(2) calculations - beginning of [l-c, l] range
- def compute_checkpoint(self, word_index):
- if word_index % self.stride == 0 and word_index != 0:
- checkpoint_index = word_index - self.expressivity
- else:
- checkpoint_index = (
- math.floor(word_index / self.stride) * self.stride
- + self.stride
- - self.expressivity
- )
- return checkpoint_index
-
- # Computes Ai(2)
- def compute_subset_summaries(self, absolute_max):
- checkpoint_index = self.compute_checkpoint(0)
- subset_two = set()
- while checkpoint_index <= absolute_max - 1:
- summary = set(
- range(
- checkpoint_index,
- min(checkpoint_index + self.expressivity + 1, absolute_max),
- )
- )
- subset_two = subset_two.union(summary)
- checkpoint_index = self.compute_checkpoint(checkpoint_index + self.stride)
- return subset_two
-
- # Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf
- def compute_fixed_attention_subset(self, word_index, tgt_len):
- # +1s account for range function; [min, max) -> [min, max]
- if not self.is_bidirectional:
- absolute_max = word_index + 1
- else:
- absolute_max = tgt_len
-
- # Subset 1 - whole window
- rounded_index = (
- math.floor((word_index + self.stride) / self.stride) * self.stride
- )
- if word_index % self.stride == 0 and word_index != 0:
- subset_one = set(
- range(word_index - self.stride, min(absolute_max, word_index + 1))
- )
- else:
- subset_one = set(
- range(
- max(0, rounded_index - self.stride),
- min(absolute_max, rounded_index + 1),
- )
- )
-
- # Subset 2 - summary per window
- # If bidirectional, subset 2 is the same for every index
- subset_two = set()
- if not self.is_bidirectional:
- subset_two = self.compute_subset_summaries(absolute_max)
-
- return subset_one.union(subset_two)
-
- # Compute sparse mask - if bidirectional, can pre-compute and store
- def buffered_sparse_mask(self, tensor, tgt_len, src_len):
- assert tgt_len > self.stride
- sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float("-inf"))
-
- # If bidirectional, subset 2 is the same for every index
- subset_summaries = set()
- if self.is_bidirectional:
- subset_summaries = self.compute_subset_summaries(tgt_len)
-
- for i in range(tgt_len):
- fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len)
- fixed_attention_subset = fixed_attention_subset.union(subset_summaries)
- included_word_indices = torch.LongTensor(list(fixed_attention_subset))
- sparse_mask[i].index_fill_(0, included_word_indices, 0)
- return sparse_mask.type_as(tensor)
-
- def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
- sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len)
- sparse_mask = sparse_mask.unsqueeze(0).expand(
- bsz * self.num_heads, tgt_len, src_len
- )
- attn_weights += sparse_mask
diff --git a/spaces/gradio/chatbot_simple/run.py b/spaces/gradio/chatbot_simple/run.py
deleted file mode 100644
index 9344665c7332e12e585d51e77758f0ed41b40797..0000000000000000000000000000000000000000
--- a/spaces/gradio/chatbot_simple/run.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import gradio as gr
-import random
-import time
-
-with gr.Blocks() as demo:
- chatbot = gr.Chatbot()
- msg = gr.Textbox()
- clear = gr.ClearButton([msg, chatbot])
-
- def respond(message, chat_history):
- bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"])
- chat_history.append((message, bot_message))
- time.sleep(2)
- return "", chat_history
-
- msg.submit(respond, [msg, chatbot], [msg, chatbot])
-
-if __name__ == "__main__":
- demo.launch()
diff --git a/spaces/gwang-kim/DATID-3D/pose_estimation/models/base_model.py b/spaces/gwang-kim/DATID-3D/pose_estimation/models/base_model.py
deleted file mode 100644
index 478d6d0d1795c180153ffd4ff7dcfbbaa328b4be..0000000000000000000000000000000000000000
--- a/spaces/gwang-kim/DATID-3D/pose_estimation/models/base_model.py
+++ /dev/null
@@ -1,317 +0,0 @@
-"""This script defines the base network model for Deep3DFaceRecon_pytorch
-"""
-
-import os
-import torch
-from collections import OrderedDict
-from abc import ABC, abstractmethod
-from . import networks
-
-
-class BaseModel(ABC):
- """This class is an abstract base class (ABC) for models.
- To create a subclass, you need to implement the following five functions:
- -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
- -- : unpack data from dataset and apply preprocessing.
- -- : produce intermediate results.
- -- : calculate losses, gradients, and update network weights.
- -- : (optionally) add model-specific options and set default options.
- """
-
- def __init__(self, opt):
- """Initialize the BaseModel class.
-
- Parameters:
- opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
-
- When creating your custom class, you need to implement your own initialization.
- In this fucntion, you should first call
- Then, you need to define four lists:
- -- self.loss_names (str list): specify the training losses that you want to plot and save.
- -- self.model_names (str list): specify the images that you want to display and save.
- -- self.visual_names (str list): define networks used in our training.
- -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
- """
- self.opt = opt
- self.isTrain = opt.isTrain
- self.device = torch.device('cpu')
- self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
- self.loss_names = []
- self.model_names = []
- self.visual_names = []
- self.parallel_names = []
- self.optimizers = []
- self.image_paths = []
- self.metric = 0 # used for learning rate policy 'plateau'
-
- @staticmethod
- def dict_grad_hook_factory(add_func=lambda x: x):
- saved_dict = dict()
-
- def hook_gen(name):
- def grad_hook(grad):
- saved_vals = add_func(grad)
- saved_dict[name] = saved_vals
- return grad_hook
- return hook_gen, saved_dict
-
- @staticmethod
- def modify_commandline_options(parser, is_train):
- """Add new model-specific options, and rewrite default values for existing options.
-
- Parameters:
- parser -- original option parser
- is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
-
- Returns:
- the modified parser.
- """
- return parser
-
- @abstractmethod
- def set_input(self, input):
- """Unpack input data from the dataloader and perform necessary pre-processing steps.
-
- Parameters:
- input (dict): includes the data itself and its metadata information.
- """
- pass
-
- @abstractmethod
- def forward(self):
- """Run forward pass; called by both functions and ."""
- pass
-
- @abstractmethod
- def optimize_parameters(self):
- """Calculate losses, gradients, and update network weights; called in every training iteration"""
- pass
-
- def setup(self, opt):
- """Load and print networks; create schedulers
-
- Parameters:
- opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
- """
- if self.isTrain:
- self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
-
- if not self.isTrain or opt.continue_train:
- load_suffix = opt.epoch
- self.load_networks(load_suffix)
-
-
- # self.print_networks(opt.verbose)
-
- def parallelize(self, convert_sync_batchnorm=True):
- if not self.opt.use_ddp:
- for name in self.parallel_names:
- if 'renderer' in name:
- continue
- if isinstance(name, str):
- module = getattr(self, name)
- setattr(self, name, module.to(self.device))
- else:
- for name in self.model_names:
- if isinstance(name, str):
- module = getattr(self, name)
- if convert_sync_batchnorm:
- module = torch.nn.SyncBatchNorm.convert_sync_batchnorm(module)
- setattr(self, name, torch.nn.parallel.DistributedDataParallel(module.to(self.device),
- device_ids=[self.device.index],
- find_unused_parameters=True, broadcast_buffers=True))
-
- # DistributedDataParallel is not needed when a module doesn't have any parameter that requires a gradient.
- for name in self.parallel_names:
- if isinstance(name, str) and name not in self.model_names:
- module = getattr(self, name)
- setattr(self, name, module.to(self.device))
-
- # put state_dict of optimizer to gpu device
- if self.opt.phase != 'test':
- if self.opt.continue_train:
- for optim in self.optimizers:
- for state in optim.state.values():
- for k, v in state.items():
- if isinstance(v, torch.Tensor):
- state[k] = v.to(self.device)
-
- def data_dependent_initialize(self, data):
- pass
-
- def train(self):
- """Make models train mode"""
- for name in self.model_names:
- if isinstance(name, str):
- net = getattr(self, name)
- net.train()
-
- def eval(self):
- """Make models eval mode"""
- for name in self.model_names:
- if isinstance(name, str):
- net = getattr(self, name)
- net.eval()
-
- def test(self):
- """Forward function used in test time.
-
- This function wraps function in no_grad() so we don't save intermediate steps for backprop
- It also calls to produce additional visualization results
- """
- with torch.no_grad():
- self.forward()
- # self.compute_visuals()
-
- def compute_visuals(self):
- """Calculate additional output images for visdom and HTML visualization"""
- pass
-
- def get_image_paths(self, name='A'):
- """ Return image paths that are used to load current data"""
- return self.image_paths if name =='A' else self.image_paths_B
-
- def update_learning_rate(self):
- """Update learning rates for all the networks; called at the end of every epoch"""
- for scheduler in self.schedulers:
- if self.opt.lr_policy == 'plateau':
- scheduler.step(self.metric)
- else:
- scheduler.step()
-
- lr = self.optimizers[0].param_groups[0]['lr']
- print('learning rate = %.7f' % lr)
-
- def get_current_visuals(self):
- """Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
- visual_ret = OrderedDict()
- for name in self.visual_names:
- if isinstance(name, str):
- visual_ret[name] = getattr(self, name)[:, :3, ...]
- return visual_ret
-
- def get_current_losses(self):
- """Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
- errors_ret = OrderedDict()
- for name in self.loss_names:
- if isinstance(name, str):
- errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
- return errors_ret
-
- def save_networks(self, epoch):
- """Save all the networks to the disk.
-
- Parameters:
- epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
- """
- if not os.path.isdir(self.save_dir):
- os.makedirs(self.save_dir)
-
- save_filename = 'epoch_%s.pth' % (epoch)
- save_path = os.path.join(self.save_dir, save_filename)
-
- save_dict = {}
- for name in self.model_names:
- if isinstance(name, str):
- net = getattr(self, name)
- if isinstance(net, torch.nn.DataParallel) or isinstance(net,
- torch.nn.parallel.DistributedDataParallel):
- net = net.module
- save_dict[name] = net.state_dict()
-
-
- for i, optim in enumerate(self.optimizers):
- save_dict['opt_%02d'%i] = optim.state_dict()
-
- for i, sched in enumerate(self.schedulers):
- save_dict['sched_%02d'%i] = sched.state_dict()
-
- torch.save(save_dict, save_path)
-
- def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
- """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
- key = keys[i]
- if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
- if module.__class__.__name__.startswith('InstanceNorm') and \
- (key == 'running_mean' or key == 'running_var'):
- if getattr(module, key) is None:
- state_dict.pop('.'.join(keys))
- if module.__class__.__name__.startswith('InstanceNorm') and \
- (key == 'num_batches_tracked'):
- state_dict.pop('.'.join(keys))
- else:
- self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
-
- def load_networks(self, epoch):
- """Load all the networks from the disk.
-
- Parameters:
- epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
- """
- if self.opt.isTrain and self.opt.pretrained_name is not None:
- load_dir = os.path.join(self.opt.checkpoints_dir, self.opt.pretrained_name)
- else:
- load_dir = self.save_dir
- load_filename = 'epoch_%s.pth' % (epoch)
- load_path = os.path.join(load_dir, load_filename)
- state_dict = torch.load(load_path, map_location=self.device)
- print('loading the model from %s' % load_path)
-
- for name in self.model_names:
- if isinstance(name, str):
- net = getattr(self, name)
- if isinstance(net, torch.nn.DataParallel):
- net = net.module
- net.load_state_dict(state_dict[name])
-
- if self.opt.phase != 'test':
- if self.opt.continue_train:
- print('loading the optim from %s' % load_path)
- for i, optim in enumerate(self.optimizers):
- optim.load_state_dict(state_dict['opt_%02d'%i])
-
- try:
- print('loading the sched from %s' % load_path)
- for i, sched in enumerate(self.schedulers):
- sched.load_state_dict(state_dict['sched_%02d'%i])
- except:
- print('Failed to load schedulers, set schedulers according to epoch count manually')
- for i, sched in enumerate(self.schedulers):
- sched.last_epoch = self.opt.epoch_count - 1
-
-
-
-
- def print_networks(self, verbose):
- """Print the total number of parameters in the network and (if verbose) network architecture
-
- Parameters:
- verbose (bool) -- if verbose: print the network architecture
- """
- print('---------- Networks initialized -------------')
- for name in self.model_names:
- if isinstance(name, str):
- net = getattr(self, name)
- num_params = 0
- for param in net.parameters():
- num_params += param.numel()
- if verbose:
- print(net)
- print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
- print('-----------------------------------------------')
-
- def set_requires_grad(self, nets, requires_grad=False):
- """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
- Parameters:
- nets (network list) -- a list of networks
- requires_grad (bool) -- whether the networks require gradients or not
- """
- if not isinstance(nets, list):
- nets = [nets]
- for net in nets:
- if net is not None:
- for param in net.parameters():
- param.requires_grad = requires_grad
-
- def generate_visuals_for_evaluation(self, data, mode):
- return {}
diff --git a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/dnnlib/tflib/optimizer.py b/spaces/gyugnsu/DragGan-Inversion/stylegan_human/dnnlib/tflib/optimizer.py
deleted file mode 100644
index cd130a8b5ca8e1af555365620fd01104a3be13ce..0000000000000000000000000000000000000000
--- a/spaces/gyugnsu/DragGan-Inversion/stylegan_human/dnnlib/tflib/optimizer.py
+++ /dev/null
@@ -1,389 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
-#
-# This work is made available under the Nvidia Source Code License-NC.
-# To view a copy of this license, visit
-# https://nvlabs.github.io/stylegan2/license.html
-
-"""Helper wrapper for a Tensorflow optimizer."""
-
-import numpy as np
-import tensorflow as tf
-
-from collections import OrderedDict
-from typing import List, Union
-
-from . import autosummary
-from . import tfutil
-from .. import util
-
-from .tfutil import TfExpression, TfExpressionEx
-
-try:
- # TensorFlow 1.13
- from tensorflow.python.ops import nccl_ops
-except:
- # Older TensorFlow versions
- import tensorflow.contrib.nccl as nccl_ops
-
-
-class Optimizer:
- """A Wrapper for tf.train.Optimizer.
-
- Automatically takes care of:
- - Gradient averaging for multi-GPU training.
- - Gradient accumulation for arbitrarily large minibatches.
- - Dynamic loss scaling and typecasts for FP16 training.
- - Ignoring corrupted gradients that contain NaNs/Infs.
- - Reporting statistics.
- - Well-chosen default settings.
- """
-
- def __init__(self,
- # Name string that will appear in TensorFlow graph.
- name: str = "Train",
- # Underlying optimizer class.
- tf_optimizer: str = "tf.train.AdamOptimizer",
- # Learning rate. Can vary over time.
- learning_rate: TfExpressionEx = 0.001,
- # Treat N consecutive minibatches as one by accumulating gradients.
- minibatch_multiplier: TfExpressionEx = None,
- # Share internal state with a previously created optimizer?
- share: "Optimizer" = None,
- # Enable dynamic loss scaling for robust mixed-precision training?
- use_loss_scaling: bool = False,
- # Log2 of initial loss scaling factor.
- loss_scaling_init: float = 64.0,
- # Log2 of per-minibatch loss scaling increment when there is no overflow.
- loss_scaling_inc: float = 0.0005,
- # Log2 of per-minibatch loss scaling decrement when there is an overflow.
- loss_scaling_dec: float = 1.0,
- # Report fine-grained memory usage statistics in TensorBoard?
- report_mem_usage: bool = False,
- **kwargs):
-
- # Public fields.
- self.name = name
- self.learning_rate = learning_rate
- self.minibatch_multiplier = minibatch_multiplier
- self.id = self.name.replace("/", ".")
- self.scope = tf.get_default_graph().unique_name(self.id)
- self.optimizer_class = util.get_obj_by_name(tf_optimizer)
- self.optimizer_kwargs = dict(kwargs)
- self.use_loss_scaling = use_loss_scaling
- self.loss_scaling_init = loss_scaling_init
- self.loss_scaling_inc = loss_scaling_inc
- self.loss_scaling_dec = loss_scaling_dec
-
- # Private fields.
- self._updates_applied = False
- self._devices = OrderedDict() # device_name => EasyDict()
- self._shared_optimizers = OrderedDict() # device_name => optimizer_class
- self._gradient_shapes = None # [shape, ...]
- self._report_mem_usage = report_mem_usage
-
- # Validate arguments.
- assert callable(self.optimizer_class)
-
- # Share internal state if requested.
- if share is not None:
- assert isinstance(share, Optimizer)
- assert self.optimizer_class is share.optimizer_class
- assert self.learning_rate is share.learning_rate
- assert self.optimizer_kwargs == share.optimizer_kwargs
- self._shared_optimizers = share._shared_optimizers # pylint: disable=protected-access
-
- def _get_device(self, device_name: str):
- """Get internal state for the given TensorFlow device."""
- tfutil.assert_tf_initialized()
- if device_name in self._devices:
- return self._devices[device_name]
-
- # Initialize fields.
- device = util.EasyDict()
- device.name = device_name
- device.optimizer = None # Underlying optimizer: optimizer_class
- device.loss_scaling_var = None # Log2 of loss scaling: tf.Variable
- # Raw gradients: var => [grad, ...]
- device.grad_raw = OrderedDict()
- device.grad_clean = OrderedDict() # Clean gradients: var => grad
- # Accumulation sums: var => tf.Variable
- device.grad_acc_vars = OrderedDict()
- device.grad_acc_count = None # Accumulation counter: tf.Variable
- device.grad_acc = OrderedDict() # Accumulated gradients: var => grad
-
- # Setup TensorFlow objects.
- with tfutil.absolute_name_scope(self.scope + "/Devices"), tf.device(device_name), tf.control_dependencies(None):
- if device_name not in self._shared_optimizers:
- optimizer_name = self.scope.replace(
- "/", "_") + "_opt%d" % len(self._shared_optimizers)
- self._shared_optimizers[device_name] = self.optimizer_class(
- name=optimizer_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
- device.optimizer = self._shared_optimizers[device_name]
- if self.use_loss_scaling:
- device.loss_scaling_var = tf.Variable(np.float32(
- self.loss_scaling_init), trainable=False, name="loss_scaling_var")
-
- # Register device.
- self._devices[device_name] = device
- return device
-
- def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None:
- """Register the gradients of the given loss function with respect to the given variables.
- Intended to be called once per GPU."""
- tfutil.assert_tf_initialized()
- assert not self._updates_applied
- device = self._get_device(loss.device)
-
- # Validate trainables.
- if isinstance(trainable_vars, dict):
- # allow passing in Network.trainables as vars
- trainable_vars = list(trainable_vars.values())
- assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1
- assert all(tfutil.is_tf_expression(expr)
- for expr in trainable_vars + [loss])
- assert all(var.device == device.name for var in trainable_vars)
-
- # Validate shapes.
- if self._gradient_shapes is None:
- self._gradient_shapes = [var.shape.as_list()
- for var in trainable_vars]
- assert len(trainable_vars) == len(self._gradient_shapes)
- assert all(var.shape.as_list() == var_shape for var,
- var_shape in zip(trainable_vars, self._gradient_shapes))
-
- # Report memory usage if requested.
- deps = []
- if self._report_mem_usage:
- self._report_mem_usage = False
- try:
- with tf.name_scope(self.id + '_mem'), tf.device(device.name), tf.control_dependencies([loss]):
- deps.append(autosummary.autosummary(
- self.id + "/mem_usage_gb", tf.contrib.memory_stats.BytesInUse() / 2**30))
- except tf.errors.NotFoundError:
- pass
-
- # Compute gradients.
- with tf.name_scope(self.id + "_grad"), tf.device(device.name), tf.control_dependencies(deps):
- loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
- gate = tf.train.Optimizer.GATE_NONE # disable gating to reduce memory usage
- grad_list = device.optimizer.compute_gradients(
- loss=loss, var_list=trainable_vars, gate_gradients=gate)
-
- # Register gradients.
- for grad, var in grad_list:
- if var not in device.grad_raw:
- device.grad_raw[var] = []
- device.grad_raw[var].append(grad)
-
- def apply_updates(self, allow_no_op: bool = False) -> tf.Operation:
- """Construct training op to update the registered variables based on their gradients."""
- tfutil.assert_tf_initialized()
- assert not self._updates_applied
- self._updates_applied = True
- all_ops = []
-
- # Check for no-op.
- if allow_no_op and len(self._devices) == 0:
- with tfutil.absolute_name_scope(self.scope):
- return tf.no_op(name='TrainingOp')
-
- # Clean up gradients.
- for device_idx, device in enumerate(self._devices.values()):
- with tfutil.absolute_name_scope(self.scope + "/Clean%d" % device_idx), tf.device(device.name):
- for var, grad in device.grad_raw.items():
-
- # Filter out disconnected gradients and convert to float32.
- grad = [g for g in grad if g is not None]
- grad = [tf.cast(g, tf.float32) for g in grad]
-
- # Sum within the device.
- if len(grad) == 0:
- grad = tf.zeros(var.shape) # No gradients => zero.
- elif len(grad) == 1:
- # Single gradient => use as is.
- grad = grad[0]
- else:
- # Multiple gradients => sum.
- grad = tf.add_n(grad)
-
- # Scale as needed.
- scale = 1.0 / \
- len(device.grad_raw[var]) / len(self._devices)
- scale = tf.constant(scale, dtype=tf.float32, name="scale")
- if self.minibatch_multiplier is not None:
- scale /= tf.cast(self.minibatch_multiplier, tf.float32)
- scale = self.undo_loss_scaling(scale)
- device.grad_clean[var] = grad * scale
-
- # Sum gradients across devices.
- if len(self._devices) > 1:
- with tfutil.absolute_name_scope(self.scope + "/Broadcast"), tf.device(None):
- for all_vars in zip(*[device.grad_clean.keys() for device in self._devices.values()]):
- # NCCL does not support zero-sized tensors.
- if len(all_vars) > 0 and all(dim > 0 for dim in all_vars[0].shape.as_list()):
- all_grads = [device.grad_clean[var] for device, var in zip(
- self._devices.values(), all_vars)]
- all_grads = nccl_ops.all_sum(all_grads)
- for device, var, grad in zip(self._devices.values(), all_vars, all_grads):
- device.grad_clean[var] = grad
-
- # Apply updates separately on each device.
- for device_idx, device in enumerate(self._devices.values()):
- with tfutil.absolute_name_scope(self.scope + "/Apply%d" % device_idx), tf.device(device.name):
- # pylint: disable=cell-var-from-loop
-
- # Accumulate gradients over time.
- if self.minibatch_multiplier is None:
- acc_ok = tf.constant(True, name='acc_ok')
- device.grad_acc = OrderedDict(device.grad_clean)
- else:
- # Create variables.
- with tf.control_dependencies(None):
- for var in device.grad_clean.keys():
- device.grad_acc_vars[var] = tf.Variable(
- tf.zeros(var.shape), trainable=False, name="grad_acc_var")
- device.grad_acc_count = tf.Variable(
- tf.zeros([]), trainable=False, name="grad_acc_count")
-
- # Track counter.
- count_cur = device.grad_acc_count + 1.0
- def count_inc_op(): return tf.assign(device.grad_acc_count, count_cur)
- def count_reset_op(): return tf.assign(device.grad_acc_count, tf.zeros([]))
- acc_ok = (count_cur >= tf.cast(
- self.minibatch_multiplier, tf.float32))
- all_ops.append(
- tf.cond(acc_ok, count_reset_op, count_inc_op))
-
- # Track gradients.
- for var, grad in device.grad_clean.items():
- acc_var = device.grad_acc_vars[var]
- acc_cur = acc_var + grad
- device.grad_acc[var] = acc_cur
- with tf.control_dependencies([acc_cur]):
- def acc_inc_op(): return tf.assign(acc_var, acc_cur)
- def acc_reset_op(): return tf.assign(acc_var, tf.zeros(var.shape))
- all_ops.append(
- tf.cond(acc_ok, acc_reset_op, acc_inc_op))
-
- # No overflow => apply gradients.
- all_ok = tf.reduce_all(tf.stack(
- [acc_ok] + [tf.reduce_all(tf.is_finite(g)) for g in device.grad_acc.values()]))
-
- def apply_op(): return device.optimizer.apply_gradients(
- [(tf.cast(grad, var.dtype), var) for var, grad in device.grad_acc.items()])
- all_ops.append(tf.cond(all_ok, apply_op, tf.no_op))
-
- # Adjust loss scaling.
- if self.use_loss_scaling:
- def ls_inc_op(): return tf.assign_add(
- device.loss_scaling_var, self.loss_scaling_inc)
- def ls_dec_op(): return tf.assign_sub(
- device.loss_scaling_var, self.loss_scaling_dec)
-
- def ls_update_op(): return tf.group(tf.cond(all_ok, ls_inc_op, ls_dec_op))
- all_ops.append(tf.cond(acc_ok, ls_update_op, tf.no_op))
-
- # Last device => report statistics.
- if device_idx == len(self._devices) - 1:
- all_ops.append(autosummary.autosummary(
- self.id + "/learning_rate", self.learning_rate))
- all_ops.append(autosummary.autosummary(
- self.id + "/overflow_frequency", tf.where(all_ok, 0, 1), condition=acc_ok))
- if self.use_loss_scaling:
- all_ops.append(autosummary.autosummary(
- self.id + "/loss_scaling_log2", device.loss_scaling_var))
-
- # Initialize variables.
- self.reset_optimizer_state()
- if self.use_loss_scaling:
- tfutil.init_uninitialized_vars(
- [device.loss_scaling_var for device in self._devices.values()])
- if self.minibatch_multiplier is not None:
- tfutil.run([var.initializer for device in self._devices.values() for var in list(
- device.grad_acc_vars.values()) + [device.grad_acc_count]])
-
- # Group everything into a single op.
- with tfutil.absolute_name_scope(self.scope):
- return tf.group(*all_ops, name="TrainingOp")
-
- def reset_optimizer_state(self) -> None:
- """Reset internal state of the underlying optimizer."""
- tfutil.assert_tf_initialized()
- tfutil.run([var.initializer for device in self._devices.values()
- for var in device.optimizer.variables()])
-
- def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]:
- """Get or create variable representing log2 of the current dynamic loss scaling factor."""
- return self._get_device(device).loss_scaling_var
-
- def apply_loss_scaling(self, value: TfExpression) -> TfExpression:
- """Apply dynamic loss scaling for the given expression."""
- assert tfutil.is_tf_expression(value)
- if not self.use_loss_scaling:
- return value
- return value * tfutil.exp2(self.get_loss_scaling_var(value.device))
-
- def undo_loss_scaling(self, value: TfExpression) -> TfExpression:
- """Undo the effect of dynamic loss scaling for the given expression."""
- assert tfutil.is_tf_expression(value)
- if not self.use_loss_scaling:
- return value
- return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type
-
-
-class SimpleAdam:
- """Simplified version of tf.train.AdamOptimizer that behaves identically when used with dnnlib.tflib.Optimizer."""
-
- def __init__(self, name="Adam", learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8):
- self.name = name
- self.learning_rate = learning_rate
- self.beta1 = beta1
- self.beta2 = beta2
- self.epsilon = epsilon
- self.all_state_vars = []
-
- def variables(self):
- return self.all_state_vars
-
- def compute_gradients(self, loss, var_list, gate_gradients=tf.train.Optimizer.GATE_NONE):
- assert gate_gradients == tf.train.Optimizer.GATE_NONE
- return list(zip(tf.gradients(loss, var_list), var_list))
-
- def apply_gradients(self, grads_and_vars):
- with tf.name_scope(self.name):
- state_vars = []
- update_ops = []
-
- # Adjust learning rate to deal with startup bias.
- with tf.control_dependencies(None):
- b1pow_var = tf.Variable(
- dtype=tf.float32, initial_value=1, trainable=False)
- b2pow_var = tf.Variable(
- dtype=tf.float32, initial_value=1, trainable=False)
- state_vars += [b1pow_var, b2pow_var]
- b1pow_new = b1pow_var * self.beta1
- b2pow_new = b2pow_var * self.beta2
- update_ops += [tf.assign(b1pow_var, b1pow_new),
- tf.assign(b2pow_var, b2pow_new)]
- lr_new = self.learning_rate * \
- tf.sqrt(1 - b2pow_new) / (1 - b1pow_new)
-
- # Construct ops to update each variable.
- for grad, var in grads_and_vars:
- with tf.control_dependencies(None):
- m_var = tf.Variable(
- dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False)
- v_var = tf.Variable(
- dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False)
- state_vars += [m_var, v_var]
- m_new = self.beta1 * m_var + (1 - self.beta1) * grad
- v_new = self.beta2 * v_var + (1 - self.beta2) * tf.square(grad)
- var_delta = lr_new * m_new / (tf.sqrt(v_new) + self.epsilon)
- update_ops += [tf.assign(m_var, m_new), tf.assign(v_var,
- v_new), tf.assign_sub(var, var_delta)]
-
- # Group everything together.
- self.all_state_vars += state_vars
- return tf.group(*update_ops)
diff --git a/spaces/gyugnsu/DragGan-Inversion/torch_utils/pti.py b/spaces/gyugnsu/DragGan-Inversion/torch_utils/pti.py
deleted file mode 100644
index 23be1ee0a101a1c6ce14db7f66673946a0f7d612..0000000000000000000000000000000000000000
--- a/spaces/gyugnsu/DragGan-Inversion/torch_utils/pti.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import pickle
-from PTI.utils.ImagesDataset import ImagesDataset, Image2Dataset
-import torch
-from PTI.utils.models_utils import load_old_G
-from PTI.utils.alignment import align_face
-
-from PTI.training.coaches.single_id_coach import SingleIDCoach
-from PTI.configs import global_config, paths_config
-import dlib
-
-import os
-from torchvision.transforms import transforms
-from torch.utils.data import DataLoader
-from string import ascii_uppercase
-import sys
-from pathlib import Path
-
-sys.path.append(".")
-# sys.path.append('PTI/')
-# sys.path.append('PTI/training/')
-
-
-def run_PTI(img, run_name):
- # os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
- # os.environ['CUDA_VISIBLE_DEVICES'] = global_config.cuda_visible_devices
-
- global_config.run_name = run_name
-
- global_config.pivotal_training_steps = 1
- global_config.training_step = 1
-
- embedding_dir_path = f"{paths_config.embedding_base_dir}/{paths_config.input_data_id}/{paths_config.pti_results_keyword}"
- os.makedirs(embedding_dir_path, exist_ok=True)
-
- # dataset = ImagesDataset(paths_config.input_data_path, transforms.Compose([
- # transforms.ToTensor(),
- # transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]))
-
- G = load_old_G()
- IMAGE_SIZE = 1024
- predictor = dlib.shape_predictor(paths_config.dlib)
- aligned_image = align_face(img, predictor=predictor, output_size=IMAGE_SIZE)
- img = aligned_image.resize([G.img_resolution, G.img_resolution])
- dataset = Image2Dataset(img)
-
- dataloader = DataLoader(dataset, batch_size=1, shuffle=False)
-
- coach = SingleIDCoach(dataloader, use_wandb=False)
-
- new_G, w_pivot = coach.train()
- return new_G, w_pivot
-
-
-def export_updated_pickle(new_G, out_path, run_name):
- image_name = "customIMG"
-
- with open(paths_config.stylegan2_ada_ffhq, "rb") as f:
- old_G = pickle.load(f)["G_ema"].cuda()
-
- embedding = Path(f"{paths_config.checkpoints_dir}/model_{run_name}_{image_name}.pt")
- with open(embedding, "rb") as f_new:
- new_G = torch.load(f_new).cuda()
-
- print("Exporting large updated pickle based off new generator and ffhq.pkl")
- with open(paths_config.stylegan2_ada_ffhq, "rb") as f:
- d = pickle.load(f)
- old_G = d["G_ema"].cuda() # tensor
- old_D = d["D"].eval().requires_grad_(False).cpu()
-
- tmp = {}
- tmp["G"] = old_G.eval().requires_grad_(False).cpu()
- tmp["G_ema"] = new_G.eval().requires_grad_(False).cpu()
- tmp["D"] = old_D
- tmp["training_set_kwargs"] = None
- tmp["augment_pipe"] = None
-
- with open(out_path, "wb") as f:
- pickle.dump(tmp, f)
- # delete
-
- embedding.unlink()
-
-
-# if __name__ == '__main__':
-# from PIL import Image
-# img = Image.open('PTI/test/test.jpg')
-# new_G, w_pivot = run_PTI(img, use_wandb=False, use_multi_id_training=False)
-# out_path = f'checkpoints/stylegan2_custom_512_pytorch.pkl'
-# export_updated_pickle(new_G, out_path)
diff --git a/spaces/h2oai/wave-tour/examples/db.py b/spaces/h2oai/wave-tour/examples/db.py
deleted file mode 100644
index f952754bd02499b7b06144c85322f0d01f6bdc0f..0000000000000000000000000000000000000000
--- a/spaces/h2oai/wave-tour/examples/db.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# WaveDB
-# How to use WaveDB, a simple sqlite3 database server that ships with Wave.
-# ---
-
-# Before you run this example, start WaveDB (`wavedb`).
-# By default, WaveDB listens on port 10100.
-#
-# To run this example, execute `python db.py`
-#
-# If your WaveDB instance is configured differently, you might want to set
-# the following environment variables accordingly:
-# H2O_WAVEDB_ADDRESS - the ip:port of the database server
-# H2O_WAVEDB_ACCESS_KEY_ID - the API access key ID
-# H2O_WAVEDB_ACCESS_KEY_SECRET - the API access key secret
-
-import asyncio
-from h2o_wave import connect
-
-
-async def main():
- # Create a database connection
- connection = connect()
-
- # Access the 'employees' database.
- # A new database is created automatically if it does not exist.
- db = connection["employees"]
-
- # Execute some statements.
- await db.exec("drop table if exists employee")
- await db.exec("create table employee(empid integer, name text, title text)")
-
- # Execute a statement and handle errors.
- results, err = await db.exec("insert into employee values(?, ?, ?)", 101, 'Jeffrey Lebowski', 'Slacker')
- if err:
- raise ValueError(err)
-
- # Execute many statements.
- insert_employee = "insert into employee values(?, ?, ?)"
- await db.exec_many(
- (insert_employee, 102, 'Walter Sobchak', 'Veteran'),
- (insert_employee, 103, 'Donny Kerabatsos', 'Sidekick'),
- (insert_employee, 104, 'Jesus Quintana', 'Bowler'),
- (insert_employee, 105, 'Uli Kunkel', 'Nihilist'),
- )
-
- # Execute many statements as a transaction.
- await db.exec_atomic(
- (insert_employee, 106, 'Brandt', 'Butler'),
- (insert_employee, 107, 'Maude Lebowski', 'Artist'),
- (insert_employee, 108, 'Franz', 'Nihilist'),
- (insert_employee, 109, 'Kieffer', 'Nihilist'),
- )
-
- # Read records.
- rows, err = await db.exec("select * from employee")
- if err:
- raise ValueError(err)
-
- print(rows)
-
- # Prints:
- # [
- # [101, 'Jeffrey Lebowski', 'Slacker'],
- # [102, 'Walter Sobchak', 'Veteran'],
- # [103, 'Donny Kerabatsos', 'Sidekick'],
- # [104, 'Jesus Quintana', 'Bowler'],
- # [105, 'Uli Kunkel', 'Nihilist'],
- # [106, 'Brandt', 'Butler'],
- # [107, 'Maude Lebowski', 'Artist'],
- # [108, 'Franz', 'Nihilist'],
- # [109, 'Kieffer', 'Nihilist']
- # ]
-
- # Clean up.
- await db.exec("drop table employee")
-
- # Drop the database entirely. Warning: A database is irrecoverable once dropped.
- await db.drop()
-
- # Close connection.
- await connection.close()
-
-
-loop = asyncio.new_event_loop()
-asyncio.set_event_loop(loop)
-loop.run_until_complete(main())
diff --git a/spaces/h2oai/wave-tour/examples/ml_h2o.py b/spaces/h2oai/wave-tour/examples/ml_h2o.py
deleted file mode 100644
index 0ff83209e104c91ff9d7a6126bc1a46071f0c706..0000000000000000000000000000000000000000
--- a/spaces/h2oai/wave-tour/examples/ml_h2o.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# WaveML / H2O-3
-# Build Wave Models for training and prediction of classification or regression using H2O-3 AutoML.
-# ---
-from h2o_wave import main, app, Q, ui
-from h2o_wave_ml import build_model, ModelType
-
-from sklearn.datasets import load_wine
-from sklearn.model_selection import train_test_split
-
-
-@app('/demo')
-async def serve(q: Q):
- if q.args.train:
- # train WaveML Model using H2O-3 AutoML
- q.client.wave_model = build_model(
- train_df=q.client.train_df,
- target_column='target',
- model_type=ModelType.H2O3,
- _h2o3_max_runtime_secs=5,
- _h2o3_nfolds=2
- )
- model_id = q.client.wave_model.model.model_id
- accuracy = round(100 - q.client.wave_model.model.mean_per_class_error() * 100, 2)
-
- # show training details and prediction option
- q.page['example'].predict.disabled = False
- q.page['example'].message.type = 'success'
- q.page['example'].message.text = 'Training successfully completed!'
- q.page['example'].model_id.content = f'''**H2O AutoML model id:** {model_id}
- **Accuracy:** {accuracy}%'''
- q.page['example'].example_predictions.content = ''
- elif q.args.predict:
- # predict on test data
- preds = q.client.wave_model.predict(test_df=q.client.test_df)
-
- # show predictions
- q.page['example'].message.text = 'Prediction successfully completed!'
- q.page['example'].example_predictions.content = f'''**Example predictions:**
- {preds[0]} {preds[1]} {preds[2]}'''
- else:
- # prepare sample train and test dataframes
- data = load_wine(as_frame=True)['frame']
- q.client.train_df, q.client.test_df = train_test_split(data, train_size=0.8)
-
- # display ui
- q.page['example'] = ui.form_card(
- box='1 1 -1 -1',
- items=[
- ui.text(content='''The sample dataset used is the
- wine dataset.'''),
- ui.buttons(items=[
- ui.button(name='train', label='Train', primary=True),
- ui.button(name='predict', label='Predict', primary=True, disabled=True),
- ]),
- ui.message_bar(name='message', type='warning', text='Training will take a few seconds'),
- ui.text(name='model_id', content=''),
- ui.text(name='example_predictions', content='')
- ]
- )
-
- await q.page.save()
diff --git a/spaces/h2oai/wave-tour/examples/plot_path_point.py b/spaces/h2oai/wave-tour/examples/plot_path_point.py
deleted file mode 100644
index 4c388d752d088b1f13fbdd4b0689b6ce665b63ca..0000000000000000000000000000000000000000
--- a/spaces/h2oai/wave-tour/examples/plot_path_point.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Plot / Path / Point
-# Make a path #plot with an additional layer of points.
-# ---
-from h2o_wave import site, data, ui
-
-page = site['/demo']
-
-page.add('example', ui.plot_card(
- box='1 1 4 5',
- title='Path + Point',
- data=data('price performance', 10, rows=[
- (0.1, 0.6),
- (0.2, 0.5),
- (0.3, 0.3),
- (0.4, 0.2),
- (0.4, 0.5),
- (0.2, 0.2),
- (0.8, 0.5),
- (0.3, 0.3),
- (0.2, 0.4),
- (0.1, 0.0),
- ]),
- plot=ui.plot([
- ui.mark(type='path', x='=price', y='=performance'),
- ui.mark(type='point', x='=price', y='=performance'),
- ])
-))
-
-page.save()
diff --git a/spaces/haakohu/deep_privacy2_face/dp2/data/transforms/functional.py b/spaces/haakohu/deep_privacy2_face/dp2/data/transforms/functional.py
deleted file mode 100644
index 1ee57f27ad07e597098ce1de967c3a50a1d06d0a..0000000000000000000000000000000000000000
--- a/spaces/haakohu/deep_privacy2_face/dp2/data/transforms/functional.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import torchvision.transforms.functional as F
-import torch
-import pickle
-from tops import download_file, assert_shape
-from typing import Dict
-from functools import lru_cache
-
-global symmetry_transform
-
-
-@lru_cache(maxsize=1)
-def get_symmetry_transform(symmetry_url):
- file_name = download_file(symmetry_url)
- with open(file_name, "rb") as fp:
- symmetry = pickle.load(fp)
- return torch.from_numpy(symmetry["vertex_transforms"]).long()
-
-
-hflip_handled_cases = set([
- "keypoints", "img", "mask", "border", "semantic_mask", "vertices", "E_mask", "embed_map", "condition",
- "embedding", "vertx2cat", "maskrcnn_mask", "__key__"])
-
-
-def hflip(container: Dict[str, torch.Tensor], flip_map=None) -> Dict[str, torch.Tensor]:
- container["img"] = F.hflip(container["img"])
- if "condition" in container:
- container["condition"] = F.hflip(container["condition"])
- if "embedding" in container:
- container["embedding"] = F.hflip(container["embedding"])
- assert all([key in hflip_handled_cases for key in container]), container.keys()
- if "keypoints" in container:
- assert flip_map is not None
- if container["keypoints"].ndim == 3:
- keypoints = container["keypoints"][:, flip_map, :]
- keypoints[:, :, 0] = 1 - keypoints[:, :, 0]
- else:
- assert_shape(container["keypoints"], (None, 3))
- keypoints = container["keypoints"][flip_map, :]
- keypoints[:, 0] = 1 - keypoints[:, 0]
- container["keypoints"] = keypoints
- if "mask" in container:
- container["mask"] = F.hflip(container["mask"])
- if "border" in container:
- container["border"] = F.hflip(container["border"])
- if "semantic_mask" in container:
- container["semantic_mask"] = F.hflip(container["semantic_mask"])
- if "vertices" in container:
- symmetry_transform = get_symmetry_transform(
- "https://dl.fbaipublicfiles.com/densepose/meshes/symmetry/symmetry_smpl_27554.pkl")
- container["vertices"] = F.hflip(container["vertices"])
- symmetry_transform_ = symmetry_transform.to(container["vertices"].device)
- container["vertices"] = symmetry_transform_[container["vertices"].long()]
- if "E_mask" in container:
- container["E_mask"] = F.hflip(container["E_mask"])
- if "maskrcnn_mask" in container:
- container["maskrcnn_mask"] = F.hflip(container["maskrcnn_mask"])
- return container
diff --git a/spaces/hands012/gpt-academic/docs/README_RS.md b/spaces/hands012/gpt-academic/docs/README_RS.md
deleted file mode 100644
index 5ba5fcccc30db520d38e21950e2f7cfc03d324c5..0000000000000000000000000000000000000000
--- a/spaces/hands012/gpt-academic/docs/README_RS.md
+++ /dev/null
@@ -1,278 +0,0 @@
-> **Note**
->
-> Этот файл самовыражения автоматически генерируется модулем перевода markdown в этом проекте и может быть не на 100% правильным.
->
-# GPT Академическая оптимизация (GPT Academic)
-
-**Если вам нравится этот проект, пожалуйста, поставьте ему звезду. Если вы придумали более полезные языковые ярлыки или функциональные плагины, не стесняйтесь открывать issue или pull request.
-Чтобы перевести этот проект на произвольный язык с помощью GPT, ознакомьтесь и запустите [`multi_language.py`](multi_language.py) (экспериментальный).
-
-> **Примечание**
->
-> 1. Обратите внимание, что только функциональные плагины (кнопки), помеченные **красным цветом**, поддерживают чтение файлов, некоторые плагины находятся в **выпадающем меню** в области плагинов. Кроме того, мы с наивысшим приоритетом рады и обрабатываем pull requests для любых новых плагинов!
->
-> 2. В каждом файле проекта функциональность описана в документе самоанализа [`self_analysis.md`](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academic%E9%A1%B9%E7%9B%AE%E8%87%AA%E8%AF%91%E8%A7%A3%E6%8A%A5%E5%91%8A). С каждой итерацией выполнения версии вы можете в любое время вызвать повторное создание отчета о самоанализе этого проекта, щелкнув соответствующий функциональный плагин и вызвав GPT. Вопросы сборки описаны в [`wiki`](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98). [Метод установки](#installation).
->
-> 3. Этот проект совместим и поощряет использование китайских языковых моделей chatglm и RWKV, пангу и т. Д. Поддержка нескольких api-key, которые могут существовать одновременно, может быть указан в файле конфигурации, например `API_KEY="openai-key1,openai-key2,api2d-key3"`. Если требуется временно изменить `API_KEY`, введите временный `API_KEY` в области ввода и нажмите клавишу Enter, чтобы он вступил в силу.
-
-> **Примечание**
->
-> При установке зависимостей строго выбирайте версии, **указанные в файле requirements.txt**.
->
-> `pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/`## Задание
-
-Вы профессиональный переводчик научных статей.
-
-Переведите этот файл в формате Markdown на русский язык. Не изменяйте существующие команды Markdown, ответьте только переведенными результатами.
-
-## Результат
-
-Функция | Описание
---- | ---
-Однокнопочный стиль | Поддержка однокнопочного стиля и поиска грамматических ошибок в научных статьях
-Однокнопочный перевод на английский и китайский | Однокнопочный перевод на английский и китайский
-Однокнопочное объяснение кода | Показ кода, объяснение его, генерация кода, комментирование кода
-[Настройка быстрых клавиш](https://www.bilibili.com/video/BV14s4y1E7jN) | Поддержка настройки быстрых клавиш
-Модульный дизайн | Поддержка пользовательских функциональных плагинов мощных [функциональных плагинов](https://github.com/binary-husky/chatgpt_academic/tree/master/crazy_functions), плагины поддерживают [горячую замену](https://github.com/binary-husky/chatgpt_academic/wiki/Function-Plug-in-Guide)
-[Анализ своей программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] [Однокнопочный просмотр](https://github.com/binary-husky/chatgpt_academic/wiki/chatgpt-academicProject-Self-analysis-Report) исходного кода этого проекта
-[Анализ программы](https://www.bilibili.com/video/BV1cj411A7VW) | [Функциональный плагин] Однокнопочный анализ дерева других проектов Python/C/C++/Java/Lua/...
-Чтение статей, [перевод](https://www.bilibili.com/video/BV1KT411x7Wn) статей | [Функциональный плагин] Однокнопочное чтение полного текста научных статей и генерация резюме
-Полный перевод [LaTeX](https://www.bilibili.com/video/BV1nk4y1Y7Js/) и совершенствование | [Функциональный плагин] Однокнопочный перевод или совершенствование LaTeX статьи
-Автоматическое комментирование | [Функциональный плагин] Однокнопочное автоматическое генерирование комментариев функций
-[Перевод](https://www.bilibili.com/video/BV1yo4y157jV/) Markdown на английский и китайский | [Функциональный плагин] Вы видели обе версии файлов [README](https://github.com/binary-husky/chatgpt_academic/blob/master/docs/README_EN.md) для этих 5 языков?
-Отчет о чат-анализе | [Функциональный плагин] После запуска будет автоматически сгенерировано сводное извещение
-Функция перевода полного текста [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) | [Функциональный плагин] Извлечение заголовка и резюме [PDF-статьи](https://www.bilibili.com/video/BV1KT411x7Wn) и перевод всего документа (многопоточность)
-[Arxiv Helper](https://www.bilibili.com/video/BV1LM4y1279X) | [Функциональный плагин] Введите URL статьи на arxiv и одним щелчком мыши переведите резюме и загрузите PDF
-[Google Scholar Integration Helper](https://www.bilibili.com/video/BV19L411U7ia) | [Функциональный плагин] При заданном любом URL страницы поиска в Google Scholar позвольте gpt вам помочь [написать обзор](https://www.bilibili.com/video/BV1GP411U7Az/)
-Сбор Интернет-информации + GPT | [Функциональный плагин] Однокнопочный [запрос информации из Интернета GPT](https://www.bilibili.com/video/BV1om4y127ck), затем ответьте на вопрос, чтобы информация не устарела никогда
-Отображение формул / изображений / таблиц | Может одновременно отображать формулы в [формате Tex и рендеринге](https://user-images.githubusercontent.com/96192199/230598842-1d7fcddd-815d-40ee-af60-baf488a199df.png), поддерживает формулы, подсвечивает код
-Поддержка функций с многопоточностью | Поддержка многопоточного вызова chatgpt, однокнопочная обработка [больших объемов текста](https://www.bilibili.com/video/BV1FT411H7c5/) или программ
-Темная тема gradio для запуска приложений | Добавьте ```/?__theme=dark``` после URL в браузере, чтобы переключиться на темную тему
-[Поддержка нескольких моделей LLM](https://www.bilibili.com/video/BV1wT411p7yf), [API2D](https://api2d.com/) | Они одновременно обслуживаются GPT3.5, GPT4, [Clear ChatGLM](https://github.com/THUDM/ChatGLM-6B), [Fudan MOSS](https://github.com/OpenLMLab/MOSS)
-Подключение нескольких новых моделей LLM, поддержка деплоя[huggingface](https://huggingface.co/spaces/qingxu98/gpt-academic) | Подключение интерфейса Newbing (новый Bing), подключение поддержки [LLaMA](https://github.com/facebookresearch/llama), поддержка [RWKV](https://github.com/BlinkDL/ChatRWKV) и [Pangu α](https://openi.org.cn/pangu/)
-Больше новых функций (генерация изображения и т. д.) | См. на конце этого файла…- All buttons are dynamically generated by reading functional.py, and custom functions can be freely added to liberate the clipboard
-
-
-
-
-- Revision/Correction
-
-
-
-
-- If the output contains formulas, they will be displayed in both tex and rendered form for easy copying and reading
-
-
-
-
-- Don't feel like looking at project code? Show the entire project directly in chatgpt
-
-
-
-
-- Mixing multiple large language models (ChatGLM + OpenAI-GPT3.5 + [API2D](https://api2d.com/)-GPT4)
-
-
-
-
----
-# Installation
-## Installation-Method 1: Run directly (Windows, Linux or MacOS)
-
-1. Download the project
-```sh
-git clone https://github.com/binary-husky/chatgpt_academic.git
-cd chatgpt_academic
-```
-
-2. Configure API_KEY
-
-In `config.py`, configure API KEY and other settings, [special network environment settings] (https://github.com/binary-husky/gpt_academic/issues/1).
-
-(P.S. When the program is running, it will first check whether there is a secret configuration file named `config_private.py` and use the configuration in it to replace the same name in` config.py`. Therefore, if you understand our configuration reading logic, we strongly recommend that you create a new configuration file named `config_private.py` next to `config.py`, and transfer (copy) the configuration in `config.py` to `config_private.py`. `config_private.py` is not controlled by git, which can make your privacy information more secure. P.S. The project also supports configuring most options through `environment variables`, and the writing format of environment variables refers to the `docker-compose` file. Priority of read: `environment variable`>`config_private.py`>`config.py`)
-
-
-3. Install dependencies
-```sh
-# (Option I: If familiar with Python)(Python version 3.9 or above, the newer the better), note: use the official pip source or the aliyun pip source, temporary switching source method: python -m pip install -r requirements.txt - i https://mirrors.aliyun.com/pypi/simple/
-python -m pip install -r requirements.txt
-
-# (Option II: If unfamiliar with Python)Use Anaconda, the steps are also similar (https://www.bilibili.com/video/BV1rc411W7Dr):
-conda create -n gptac_venv python=3.11 # create an Anaconda environment
-conda activate gptac_venv # activate Anaconda environment
-python -m pip install -r requirements.txt # This step is the same as the pip installation
-```
-
- If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, click here to expand
-
-
-[Optional step] If you need to support Tsinghua ChatGLM/Fudan MOSS as backend, you need to install more dependencies (prerequisites: familiar with Python + have used Pytorch + computer configuration is strong):
-```sh
-# [Optional step I] Support Tsinghua ChatGLM. Tsinghua ChatGLM note: If you encounter the "Call ChatGLM fail cannot load ChatGLM parameters normally" error, refer to the following: 1: The default installation above is torch+cpu version, and cuda is used Need to uninstall torch and reinstall torch+cuda; 2: If you cannot load the model due to insufficient local configuration, you can modify the model accuracy in request_llm/bridge_chatglm.py, AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) Modify to AutoTokenizer.from_pretrained("THUDM/chatglm-6b-int4", trust_remote_code=True)
-python -m pip install -r request_llm/requirements_chatglm.txt
-
-# [Optional step II] Support Fudan MOSS
-python -m pip install -r request_llm/requirements_moss.txt
-git clone https://github.com/OpenLMLab/MOSS.git request_llm/moss # Note that when executing this line of code, you must be in the project root path
-
-# [Optional step III] Make sure the AVAIL_LLM_MODELS in the config.py configuration file contains the expected models. Currently, all supported models are as follows (the jittorllms series currently only supports the docker solution):
-AVAIL_LLM_MODELS = ["gpt-3.5-turbo", "api2d-gpt-3.5-turbo", "gpt-4", "api2d-gpt-4", "chatglm", "newbing", "moss"] # + ["jittorllms_rwkv", "jittorllms_pangualpha", "jittorllms_llama"]
-```
-
-
-
-
-
-
-4. Run
-```sh
-python main.py
-```5. Testing Function Plugin
-```
-- Testing function plugin template function (requires GPT to answer what happened in history today), you can use this function as a template to implement more complex functions
- Click "[Function plugin Template Demo] On this day in history"
-```
-
-## Installation - Method 2: Using Docker
-
-1. ChatGPT only (recommended for most people)
-
-``` sh
-git clone https://github.com/binary-husky/chatgpt_academic.git # download the project
-cd chatgpt_academic # enter the path
-nano config.py # edit config.py with any text editor to configure "Proxy", "API_KEY", and "WEB_PORT" (eg 50923)
-docker build -t gpt-academic . # install
-
-# (Last step-Option 1) In a Linux environment, using `--net=host` is more convenient and faster
-docker run --rm -it --net=host gpt-academic
-# (Last step-Option 2) In macOS/windows environment, only -p option can be used to expose the port on the container (eg 50923) to the port on the host
-docker run --rm -it -e WEB_PORT=50923 -p 50923:50923 gpt-academic
-```
-
-2. ChatGPT + ChatGLM + MOSS (requires familiarity with Docker)
-
-``` sh
-# Edit docker-compose.yml, delete solutions 1 and 3, and keep solution 2. Modify the configuration of solution 2 in docker-compose.yml, refer to the comments in it
-docker-compose up
-```
-
-3. ChatGPT + LLAMA + PanGu + RWKV (requires familiarity with Docker)
-``` sh
-# Edit docker-compose.yml, delete solutions 1 and 2, and keep solution 3. Modify the configuration of solution 3 in docker-compose.yml, refer to the comments in it
-docker-compose up
-```
-
-
-## Installation Method 3: Other Deployment Methods
-
-1. How to use reverse proxy URL/Microsoft Azure API
-Configure API_URL_REDIRECT according to the instructions in `config.py`.
-
-2. Remote Cloud Server Deployment (Requires Knowledge and Experience of Cloud Servers)
-Please visit [Deployment Wiki-1](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BA%91%E6%9C%8D%E5%8A%A1%E5%99%A8%E8%BF%9C%E7%A8%8B%E9%83%A8%E7%BD%B2%E6%8C%87%E5%8D%97)
-
-3. Using WSL2 (Windows Subsystem for Linux subsystem)
-Please visit [Deployment Wiki-2](https://github.com/binary-husky/chatgpt_academic/wiki/%E4%BD%BF%E7%94%A8WSL2%EF%BC%88Windows-Subsystem-for-Linux-%E5%AD%90%E7%B3%BB%E7%BB%9F%EF%BC%89%E9%83%A8%E7%BD%B2)
-
-4. How to run at the secondary URL (such as `http://localhost/subpath`)
-Please visit [FastAPI Operation Instructions](docs/WithFastapi.md)
-
-5. Using docker-compose to run
-Please read docker-compose.yml and follow the prompts to operate.
-
----
-# Advanced Usage
-## Customize new convenient buttons / custom function plugins
-
-1. Customize new convenient buttons (academic shortcuts)
-Open `core_functional.py` with any text editor, add an entry as follows, and then restart the program. (If the button has been added successfully and is visible, both prefixes and suffixes can be hot-modified without having to restart the program.)
-For example:
-```
-"Super English to Chinese": {
- # Prefix, will be added before your input. For example, describe your requirements, such as translation, code interpretation, polishing, etc.
- "Prefix": "Please translate the following content into Chinese, and then explain each proper noun that appears in the text with a markdown table:\n\n",
-
- # Suffix, will be added after your input. For example, with the prefix, you can enclose your input content in quotes.
- "Suffix": "",
-},
-```
-
-
-
-
-2. Custom function plugin
-
-Write powerful function plugins to perform any task you can and can't imagine.
-The difficulty of debugging and writing plugins in this project is very low. As long as you have a certain knowledge of python, you can implement your own plugin function by imitating the template we provide.
-Please refer to the [Function Plugin Guide](https://github.com/binary-husky/chatgpt_academic/wiki/%E5%87%BD%E6%95%B0%E6%8F%92%E4%BB%B6%E6%8C%87%E5%8D%97) for details.
-
----
-# Latest Update
-## New feature dynamic
-
-1. Сохранение диалогов. Вызовите "Сохранить текущий диалог" в разделе функций-плагина, чтобы сохранить текущий диалог как файл HTML, который можно прочитать и восстановить. Кроме того, вызовите «Загрузить архив истории диалога» в меню функций-плагина, чтобы восстановить предыдущую сессию. Совет: если нажать кнопку "Загрузить исторический архив диалога" без указания файла, можно просмотреть кэш исторических файлов HTML. Щелкните "Удалить все локальные записи истории диалогов", чтобы удалить все файловые кэши HTML.
-
-2. Создание отчетов. Большинство плагинов создают рабочий отчет после завершения выполнения.
-
-3. Модульный дизайн функций, простой интерфейс, но сильный функционал.
-
-4. Это проект с открытым исходным кодом, который может «сам переводить себя».
-
-5. Перевод других проектов с открытым исходным кодом - это не проблема.
-
-6. Мелкие функции декорирования [live2d](https://github.com/fghrsh/live2d_demo) (по умолчанию отключены, нужно изменить `config.py`).
-
-7. Поддержка большой языковой модели MOSS.
-
-8. Генерация изображений с помощью OpenAI.
-
-9. Анализ и подведение итогов аудиофайлов с помощью OpenAI.
-
-10. Полный цикл проверки правописания с использованием LaTeX.
-
-## Версии:
-- Версия 3.5 (Todo): использование естественного языка для вызова функций-плагинов проекта (высокий приоритет)
-- Версия 3.4 (Todo): улучшение многопоточной поддержки локальных больших моделей чата.
-- Версия 3.3: добавлена функция объединения интернет-информации.
-- Версия 3.2: функции-плагины поддерживают большое количество параметров (сохранение диалогов, анализирование любого языка программирования и одновременное запрос LLM-групп).
-- Версия 3.1: поддержка одновременного запроса нескольких моделей GPT! Поддержка api2d, сбалансированное распределение нагрузки по нескольким ключам api.
-- Версия 3.0: поддержка chatglm и других небольших LLM.
-- Версия 2.6: перестройка структуры плагинов, улучшение интерактивности, добавлено больше плагинов.
-- Версия 2.5: автоматическое обновление для решения проблемы длинного текста и переполнения токенов при обработке больших проектов.
-- Версия 2.4: (1) добавлена функция полного перевода PDF; (2) добавлена функция переключения положения ввода; (3) добавлена опция вертикального макета; (4) оптимизация многопоточности плагинов.
-- Версия 2.3: улучшение многопоточной интерактивности.
-- Версия 2.2: функции-плагины поддерживают горячую перезагрузку.
-- Версия 2.1: раскрывающийся макет.
-- Версия 2.0: использование модульных функций-плагинов.
-- Версия 1.0: базовые функции.
-
-gpt_academic Разработчик QQ-группы-2: 610599535
-
-- Известные проблемы
- - Некоторые плагины перевода в браузерах мешают работе фронтенда этого программного обеспечения
- - Высокая или низкая версия gradio может вызвать множество исключений
-
-## Ссылки и учебные материалы
-
-```
-Мы использовали многие концепты кода из других отличных проектов, включая:
-
-# Проект 1: Qinghua ChatGLM-6B:
-https://github.com/THUDM/ChatGLM-6B
-
-# Проект 2: Qinghua JittorLLMs:
-https://github.com/Jittor/JittorLLMs
-
-# Проект 3: Edge-GPT:
-https://github.com/acheong08/EdgeGPT
-
-# Проект 4: Chuanhu ChatGPT:
-https://github.com/GaiZhenbiao/ChuanhuChatGPT
-
-# Проект 5: ChatPaper:
-https://github.com/kaixindelele/ChatPaper
-
-# Больше:
-https://github.com/gradio-app/gradio
-https://github.com/fghrsh/live2d_demo
-```
\ No newline at end of file
diff --git a/spaces/haoqi7/research/widgets/sidebar.py b/spaces/haoqi7/research/widgets/sidebar.py
deleted file mode 100644
index 75474bc6ae24c1ddee753c27f62ef0318c0f99cc..0000000000000000000000000000000000000000
--- a/spaces/haoqi7/research/widgets/sidebar.py
+++ /dev/null
@@ -1,96 +0,0 @@
-import streamlit as st
-import datetime
-# from .utils import PACKAGE_ROOT
-from lrt.utils.functions import template
-
-APP_VERSION = 'v1.4.1'
-
-def render_sidebar():
- icons = f'''
-
-
-
- '''
-
- sidebar_markdown = f'''
-
-
-
-
-
- Literature Research Tool
-
-
-
-
- {APP_VERSION}
-
-
-
-
-
-
- {icons}
-
- ---
-
- ## Choose the Paper Search Platforms'''
- st.sidebar.markdown(sidebar_markdown,unsafe_allow_html=True)
- # elvsier = st.sidebar.checkbox('Elvsier',value=True)
- # IEEE = st.sidebar.checkbox('IEEE',value=False)
- # google = st.sidebar.checkbox('Google Scholar')
- platforms = st.sidebar.multiselect('Platforms',options=
- [
- # 'Elvsier',
- 'IEEE',
- # 'Google Scholar',
- 'Arxiv',
- 'Paper with Code'
- ], default=[
- # 'Elvsier',
- 'IEEE',
- # 'Google Scholar',
- 'Arxiv',
- 'Paper with Code'
- ])
-
-
-
- st.sidebar.markdown('## Choose the max number of papers to search')
- number_papers=st.sidebar.slider('number', 10, 100, 20, 5)
-
- st.sidebar.markdown('## Choose the start year of publication')
- this_year = datetime.date.today().year
- start_year = st.sidebar.slider('year start:', 2000, this_year, 2010, 1)
-
- st.sidebar.markdown('## Choose the end year of publication')
- end_year = st.sidebar.slider('year end:', 2000, this_year, this_year, 1)
-
-
- with st.sidebar:
- st.markdown('## Adjust hyperparameters')
- with st.expander('Clustering Options'):
- standardization = st.selectbox('1) Standardization before clustering', options=['no', 'yes'], index=0 )
- dr = st.selectbox('2) Dimension reduction', options=['none', 'pca'], index=0)
- tmp = min(number_papers,15)
- max_k = st.slider('3) Max number of clusters', 2,tmp , tmp//2)
- cluster_model = st.selectbox('4) Clustering model', options=['Gaussian Mixture Model', 'K-means'], index=0)
-
- with st.expander('Keyphrases Generation Options'):
- model_cpt = st.selectbox(label='Model checkpoint', options=template.keywords_extraction.keys(),index=0)
-
-
- st.markdown('---')
- st.markdown(icons,unsafe_allow_html=True)
- st.markdown('''
-
-Cisco network magic pro 5.5.9195 serial numbers, cracks and keygens ... CRACK Positive Grid - BIAS FX Desktop 1.6.6.3633 (VST, AAX) x64 4d29de3e1b
-
-
-
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Driver Thinkcentre Daytona A Rev 33.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Driver Thinkcentre Daytona A Rev 33.md
deleted file mode 100644
index fd96e9acb9083a1932d195d3ac1ab0bc81304da9..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Driver Thinkcentre Daytona A Rev 33.md
+++ /dev/null
@@ -1,135 +0,0 @@
-
-
Driver Thinkcentre Daytona A Rev 33: What You Need to Know
-
-
If you are looking for a driver for your Lenovo Thinkcentre Daytona A Rev 33 motherboard, you might have a hard time finding it online. This is because this motherboard is an OEM product that was customized for specific models of Lenovo desktop computers. Therefore, the driver for this motherboard is not available on the official Lenovo website or other driver download sites.
However, this does not mean that you cannot use your Thinkcentre Daytona A Rev 33 motherboard with Windows. You just need to find the right driver that matches your system requirements and specifications. In this article, we will show you how to do that and where to download the driver thinkcentre daytona a rev 33 for free.
-
-
How to Find the Right Driver Thinkcentre Daytona A Rev 33
-
-
To find the right driver thinkcentre daytona a rev 33 for your PC, you need to follow these steps:
-
-
-
Identify the model and serial number of your Lenovo desktop computer. You can find this information on a sticker on the back or side of your PC or on the BIOS screen.
-
Go to the Lenovo support website and enter your model and serial number in the search box. This will take you to the product page of your PC.
-
On the product page, click on Drivers & Software and select your operating system (Windows 7, Windows 8, Windows 10, etc.).
-
Look for the Audio driver category and download the latest version of the Realtek HD Audio Driver. This is the driver that supports the Thinkcentre Daytona A Rev 33 motherboard.
-
Save the downloaded file on your PC and run it as an administrator. Follow the instructions to install the driver thinkcentre daytona a rev 33 on your PC.
-
Restart your PC and check if your audio device is working properly.
-
-
-
If you cannot find the Realtek HD Audio Driver on the Lenovo support website or if it does not work for your PC, you can try another method to find the driver thinkcentre daytona a rev 33.
-
-
-
How to Download Driver Thinkcentre Daytona A Rev 33 from Third-Party Sites
-
-
If you cannot find or install the driver thinkcentre daytona a rev 33 from the Lenovo support website, you can try downloading it from third-party sites that offer driver downloads for various devices. However, you need to be careful when using these sites as some of them may contain malware or viruses that can harm your PC.
-
-
To download driver thinkcentre daytona a rev 33 from third-party sites, you need to follow these steps:
-
-
-
Go to a reputable and trusted driver download site, such as DriverGuide, DriversCollection, or DriverIdentifier.
-
Search for driver thinkcentre daytona a rev 33 or Lenovo Thinkcentre Daytona A Rev 33 motherboard in the search box.
-
Choose the driver that matches your system requirements and specifications. Make sure it is compatible with your operating system and has positive reviews and ratings from other users.
-
Download the driver file on your PC and scan it with an antivirus software before opening it.
-
Run the driver file as an administrator and follow the instructions to install it on your PC.
-
Restart your PC and check if your audio device is working properly.
-
-
-
If you still have problems or errors with your audio device after installing driver thinkcentre daytona a rev 33, you can contact Lenovo customer support or visit their forum for more help and guidance.
-
-
Conclusion
-
-
Driver thinkcentre daytona a rev 33 is a driver that supports the Lenovo Thinkcentre Daytona A Rev 33 motherboard, which is an OEM product that was customized for specific models of Lenovo desktop computers. You can find and download this driver from the Lenovo support website or from third-party sites that offer driver downloads for various devices. However, you need to be careful when using these sites as some of them may contain malware or viruses that can harm your PC.
-
-
In this article, we have shown you how to find and download driver thinkcentre daytona a rev 33 for free and how to install it on your PC. We hope you have found this article helpful and informative.
-
-
If you have any questions or feedback about driver thinkcentre daytona a rev 33, feel free to leave a comment below or contact us through our website.
-
How to Update Driver Thinkcentre Daytona A Rev 33
-
-
If you want to update driver thinkcentre daytona a rev 33 to the latest version or fix any issues or bugs, you can do these things:
-
-
-
Go to the Device Manager and find your audio device under the Sound, video and game controllers category.
-
Right-click on your audio device and select Update driver.
-
Choose Search automatically for updated driver software and wait for Windows to find and install the latest driver thinkcentre daytona a rev 33 for your PC.
-
Restart your PC and check if your audio device is working properly.
-
-
-
If Windows cannot find or install the latest driver thinkcentre daytona a rev 33 for your PC, you can try another method to update it.
-
-
How to Download Driver Thinkcentre Daytona A Rev 33 from Lenovo Support Website
-
-
If you want to download driver thinkcentre daytona a rev 33 from the Lenovo support website, you can follow these steps:
-
-
-
Go to the Lenovo support website and enter your model and serial number in the search box. This will take you to the product page of your PC.
-
On the product page, click on Drivers & Software and select your operating system (Windows 7, Windows 8, Windows 10, etc.).
-
Look for the Audio driver category and download the latest version of the Realtek HD Audio Driver. This is the driver that supports the Thinkcentre Daytona A Rev 33 motherboard.
-
Save the downloaded file on your PC and run it as an administrator. Follow the instructions to install driver thinkcentre daytona a rev 33 on your PC.
-
Restart your PC and check if your audio device is working properly.
-
-
-
This is the best way to download driver thinkcentre daytona a rev 33 from the Lenovo support website as it ensures that you get the official and compatible driver for your PC.
-
How to Backup Driver Thinkcentre Daytona A Rev 33
-
-
If you want to backup driver thinkcentre daytona a rev 33 for future use or restore it in case of any problems or errors, you can do these things:
-
-
-
Go to the Device Manager and find your audio device under the Sound, video and game controllers category.
-
Right-click on your audio device and select Properties.
-
Go to the Driver tab and click on Driver Details.
-
Note down the name and location of the driver file (usually something like C:\\Windows\\System32\\drivers\\RTKVHD64.sys).
-
Copy the driver file to a safe location, such as a USB flash drive or an external hard drive.
-
If you need to restore driver thinkcentre daytona a rev 33, you can copy the driver file back to its original location and restart your PC.
-
-
-
This is a simple way to backup driver thinkcentre daytona a rev 33 for future use or restore it in case of any problems or errors.
-
-
How to Uninstall Driver Thinkcentre Daytona A Rev 33
-
-
If you want to uninstall driver thinkcentre daytona a rev 33 from your PC, you can follow these steps:
-
-
-
Go to the Control Panel and click on Programs and Features.
-
Find Realtek HD Audio Driver in the list of installed programs and click on Uninstall.
-
Follow the instructions to complete the uninstallation process.
-
Delete the driver file from its original location if you still have it.
-
-
-
This is how you can uninstall driver thinkcentre daytona a rev 33 from your PC. However, you should be careful when doing this as it may affect your audio device or other devices that use the same driver.
-
How to Fix Driver Thinkcentre Daytona A Rev 33 Errors
-
-
Sometimes, you may encounter some errors or issues with driver thinkcentre daytona a rev 33 that may affect your audio device or other devices that use the same driver. Here are some common errors and how to fix them:
-
-
-
Error code 10: This means that the driver thinkcentre daytona a rev 33 is not installed properly or is corrupted. To fix this, you can try to update or reinstall the driver thinkcentre daytona a rev 33 from the Lenovo support website or from third-party sites.
-
Error code 28: This means that the driver thinkcentre daytona a rev 33 is not installed on your PC. To fix this, you can try to install the driver thinkcentre daytona a rev 33 from the Lenovo support website or from third-party sites.
-
Error code 39: This means that the driver thinkcentre daytona a rev 33 is missing or corrupted. To fix this, you can try to delete the driver file from its original location and reinstall the driver thinkcentre daytona a rev 33 from the Lenovo support website or from third-party sites.
-
Error code 52: This means that the driver thinkcentre daytona a rev 33 is not digitally signed or verified by Windows. To fix this, you can try to disable the driver signature enforcement on your PC and reinstall the driver thinkcentre daytona a rev 33 from the Lenovo support website or from third-party sites.
-
-
-
These are some of the common errors and how to fix them with driver thinkcentre daytona a rev 33. However, if you still have errors or issues with your audio device or other devices that use the same driver, you can contact Lenovo customer support or visit their forum for more help and guidance.
-
-
How to Optimize Driver Thinkcentre Daytona A Rev 33 for Better Performance
-
-
If you want to optimize driver thinkcentre daytona a rev 33 for better performance and sound quality, you can do these things:
-
-
-
Adjust the audio settings on your PC according to your preferences and needs. You can use the Realtek HD Audio Manager or the Windows Sound Settings to change the volume, balance, equalizer, and other options.
-
Update driver thinkcentre daytona a rev 33 regularly to get the latest features and improvements. You can use the Device Manager or the Lenovo support website to check for updates and install them on your PC.
-
Clean up your PC and remove any unnecessary files or programs that may slow down your PC or interfere with driver thinkcentre daytona a rev 33. You can use a disk cleanup tool or an uninstaller software to do this.
-
Scan your PC for any malware or viruses that may harm your PC or affect driver thinkcentre daytona a rev 33. You can use an antivirus software or an anti-malware software to do this.
-
-
-
These are some of the ways to optimize driver thinkcentre daytona a rev 33 for better performance and sound quality. However, you should also be careful not to overdo it or change any settings that may damage your PC or audio device.
-
Conclusion
-
-
Driver thinkcentre daytona a rev 33 is a driver that supports the Lenovo Thinkcentre Daytona A Rev 33 motherboard, which is an OEM product that was customized for specific models of Lenovo desktop computers. You can find and download this driver from the Lenovo support website or from third-party sites that offer driver downloads for various devices. However, you need to be careful when using these sites as some of them may contain malware or viruses that can harm your PC.
-
-
In this article, we have shown you how to find, download, install, update, backup, uninstall, troubleshoot, and optimize driver thinkcentre daytona a rev 33 for your PC. We have also explained some of the features and benefits of using driver thinkcentre daytona a rev 33 for your audio device and other devices that use the same driver. We hope you have found this article helpful and informative.
-
-
If you have any questions or feedback about driver thinkcentre daytona a rev 33, feel free to leave a comment below or contact us through our website.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (The Chronicles Of Narnia 3 In Hindi ).md b/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (The Chronicles Of Narnia 3 In Hindi ).md
deleted file mode 100644
index 076d56a092e9e31066741fe0b62c153aaa9bc0e3..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/HD Online Player (The Chronicles Of Narnia 3 In Hindi ).md
+++ /dev/null
@@ -1,6 +0,0 @@
-
HD Online Player (The Chronicles Of Narnia 3 In Hindi )
-
-Spiral: From the Book of Saw - HEVC/MKV 4K Ultra HD Teaser Trailer (HEVC) ... Captain Marvel 2019 HDTS Dual Audio [Hindi + English] 720p x264 AAC ... a mp4 as pretty much every bluray player out there supports playback of mp4 (a lot ... (2011) The Chronicles Of Narnia :The Voyage Of The Dawn Treader (2010) The ... 4d29de3e1b
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Autodesk 3ds Max 2011 !!EXCLUSIVE!! Xforce Keygen 12.md b/spaces/inreVtussa/clothingai/Examples/Autodesk 3ds Max 2011 !!EXCLUSIVE!! Xforce Keygen 12.md
deleted file mode 100644
index c099d383bb1bfe4dcfbc35fdf9a49b27b28109ad..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Autodesk 3ds Max 2011 !!EXCLUSIVE!! Xforce Keygen 12.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-'''
-print(results_html)
-st.markdown(results_html, unsafe_allow_html=True)
diff --git a/spaces/jbetker/tortoise/models/vocoder.py b/spaces/jbetker/tortoise/models/vocoder.py
deleted file mode 100644
index d38fb56699c035b3d4a86ace67c567d3f1d51fa9..0000000000000000000000000000000000000000
--- a/spaces/jbetker/tortoise/models/vocoder.py
+++ /dev/null
@@ -1,325 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-MAX_WAV_VALUE = 32768.0
-
-class KernelPredictor(torch.nn.Module):
- ''' Kernel predictor for the location-variable convolutions'''
-
- def __init__(
- self,
- cond_channels,
- conv_in_channels,
- conv_out_channels,
- conv_layers,
- conv_kernel_size=3,
- kpnet_hidden_channels=64,
- kpnet_conv_size=3,
- kpnet_dropout=0.0,
- kpnet_nonlinear_activation="LeakyReLU",
- kpnet_nonlinear_activation_params={"negative_slope": 0.1},
- ):
- '''
- Args:
- cond_channels (int): number of channel for the conditioning sequence,
- conv_in_channels (int): number of channel for the input sequence,
- conv_out_channels (int): number of channel for the output sequence,
- conv_layers (int): number of layers
- '''
- super().__init__()
-
- self.conv_in_channels = conv_in_channels
- self.conv_out_channels = conv_out_channels
- self.conv_kernel_size = conv_kernel_size
- self.conv_layers = conv_layers
-
- kpnet_kernel_channels = conv_in_channels * conv_out_channels * conv_kernel_size * conv_layers # l_w
- kpnet_bias_channels = conv_out_channels * conv_layers # l_b
-
- self.input_conv = nn.Sequential(
- nn.utils.weight_norm(nn.Conv1d(cond_channels, kpnet_hidden_channels, 5, padding=2, bias=True)),
- getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
- )
-
- self.residual_convs = nn.ModuleList()
- padding = (kpnet_conv_size - 1) // 2
- for _ in range(3):
- self.residual_convs.append(
- nn.Sequential(
- nn.Dropout(kpnet_dropout),
- nn.utils.weight_norm(
- nn.Conv1d(kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding,
- bias=True)),
- getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
- nn.utils.weight_norm(
- nn.Conv1d(kpnet_hidden_channels, kpnet_hidden_channels, kpnet_conv_size, padding=padding,
- bias=True)),
- getattr(nn, kpnet_nonlinear_activation)(**kpnet_nonlinear_activation_params),
- )
- )
- self.kernel_conv = nn.utils.weight_norm(
- nn.Conv1d(kpnet_hidden_channels, kpnet_kernel_channels, kpnet_conv_size, padding=padding, bias=True))
- self.bias_conv = nn.utils.weight_norm(
- nn.Conv1d(kpnet_hidden_channels, kpnet_bias_channels, kpnet_conv_size, padding=padding, bias=True))
-
- def forward(self, c):
- '''
- Args:
- c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)
- '''
- batch, _, cond_length = c.shape
- c = self.input_conv(c)
- for residual_conv in self.residual_convs:
- residual_conv.to(c.device)
- c = c + residual_conv(c)
- k = self.kernel_conv(c)
- b = self.bias_conv(c)
- kernels = k.contiguous().view(
- batch,
- self.conv_layers,
- self.conv_in_channels,
- self.conv_out_channels,
- self.conv_kernel_size,
- cond_length,
- )
- bias = b.contiguous().view(
- batch,
- self.conv_layers,
- self.conv_out_channels,
- cond_length,
- )
-
- return kernels, bias
-
- def remove_weight_norm(self):
- nn.utils.remove_weight_norm(self.input_conv[0])
- nn.utils.remove_weight_norm(self.kernel_conv)
- nn.utils.remove_weight_norm(self.bias_conv)
- for block in self.residual_convs:
- nn.utils.remove_weight_norm(block[1])
- nn.utils.remove_weight_norm(block[3])
-
-
-class LVCBlock(torch.nn.Module):
- '''the location-variable convolutions'''
-
- def __init__(
- self,
- in_channels,
- cond_channels,
- stride,
- dilations=[1, 3, 9, 27],
- lReLU_slope=0.2,
- conv_kernel_size=3,
- cond_hop_length=256,
- kpnet_hidden_channels=64,
- kpnet_conv_size=3,
- kpnet_dropout=0.0,
- ):
- super().__init__()
-
- self.cond_hop_length = cond_hop_length
- self.conv_layers = len(dilations)
- self.conv_kernel_size = conv_kernel_size
-
- self.kernel_predictor = KernelPredictor(
- cond_channels=cond_channels,
- conv_in_channels=in_channels,
- conv_out_channels=2 * in_channels,
- conv_layers=len(dilations),
- conv_kernel_size=conv_kernel_size,
- kpnet_hidden_channels=kpnet_hidden_channels,
- kpnet_conv_size=kpnet_conv_size,
- kpnet_dropout=kpnet_dropout,
- kpnet_nonlinear_activation_params={"negative_slope": lReLU_slope}
- )
-
- self.convt_pre = nn.Sequential(
- nn.LeakyReLU(lReLU_slope),
- nn.utils.weight_norm(nn.ConvTranspose1d(in_channels, in_channels, 2 * stride, stride=stride,
- padding=stride // 2 + stride % 2, output_padding=stride % 2)),
- )
-
- self.conv_blocks = nn.ModuleList()
- for dilation in dilations:
- self.conv_blocks.append(
- nn.Sequential(
- nn.LeakyReLU(lReLU_slope),
- nn.utils.weight_norm(nn.Conv1d(in_channels, in_channels, conv_kernel_size,
- padding=dilation * (conv_kernel_size - 1) // 2, dilation=dilation)),
- nn.LeakyReLU(lReLU_slope),
- )
- )
-
- def forward(self, x, c):
- ''' forward propagation of the location-variable convolutions.
- Args:
- x (Tensor): the input sequence (batch, in_channels, in_length)
- c (Tensor): the conditioning sequence (batch, cond_channels, cond_length)
-
- Returns:
- Tensor: the output sequence (batch, in_channels, in_length)
- '''
- _, in_channels, _ = x.shape # (B, c_g, L')
-
- x = self.convt_pre(x) # (B, c_g, stride * L')
- kernels, bias = self.kernel_predictor(c)
-
- for i, conv in enumerate(self.conv_blocks):
- output = conv(x) # (B, c_g, stride * L')
-
- k = kernels[:, i, :, :, :, :] # (B, 2 * c_g, c_g, kernel_size, cond_length)
- b = bias[:, i, :, :] # (B, 2 * c_g, cond_length)
-
- output = self.location_variable_convolution(output, k, b,
- hop_size=self.cond_hop_length) # (B, 2 * c_g, stride * L'): LVC
- x = x + torch.sigmoid(output[:, :in_channels, :]) * torch.tanh(
- output[:, in_channels:, :]) # (B, c_g, stride * L'): GAU
-
- return x
-
- def location_variable_convolution(self, x, kernel, bias, dilation=1, hop_size=256):
- ''' perform location-variable convolution operation on the input sequence (x) using the local convolution kernl.
- Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100.
- Args:
- x (Tensor): the input sequence (batch, in_channels, in_length).
- kernel (Tensor): the local convolution kernel (batch, in_channel, out_channels, kernel_size, kernel_length)
- bias (Tensor): the bias for the local convolution (batch, out_channels, kernel_length)
- dilation (int): the dilation of convolution.
- hop_size (int): the hop_size of the conditioning sequence.
- Returns:
- (Tensor): the output sequence after performing local convolution. (batch, out_channels, in_length).
- '''
- batch, _, in_length = x.shape
- batch, _, out_channels, kernel_size, kernel_length = kernel.shape
- assert in_length == (kernel_length * hop_size), "length of (x, kernel) is not matched"
-
- padding = dilation * int((kernel_size - 1) / 2)
- x = F.pad(x, (padding, padding), 'constant', 0) # (batch, in_channels, in_length + 2*padding)
- x = x.unfold(2, hop_size + 2 * padding, hop_size) # (batch, in_channels, kernel_length, hop_size + 2*padding)
-
- if hop_size < dilation:
- x = F.pad(x, (0, dilation), 'constant', 0)
- x = x.unfold(3, dilation,
- dilation) # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation)
- x = x[:, :, :, :, :hop_size]
- x = x.transpose(3, 4) # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation)
- x = x.unfold(4, kernel_size, 1) # (batch, in_channels, kernel_length, dilation, _, kernel_size)
-
- o = torch.einsum('bildsk,biokl->bolsd', x, kernel)
- o = o.to(memory_format=torch.channels_last_3d)
- bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d)
- o = o + bias
- o = o.contiguous().view(batch, out_channels, -1)
-
- return o
-
- def remove_weight_norm(self):
- self.kernel_predictor.remove_weight_norm()
- nn.utils.remove_weight_norm(self.convt_pre[1])
- for block in self.conv_blocks:
- nn.utils.remove_weight_norm(block[1])
-
-
-class UnivNetGenerator(nn.Module):
- """UnivNet Generator"""
-
- def __init__(self, noise_dim=64, channel_size=32, dilations=[1,3,9,27], strides=[8,8,4], lReLU_slope=.2, kpnet_conv_size=3,
- # Below are MEL configurations options that this generator requires.
- hop_length=256, n_mel_channels=100):
- super(UnivNetGenerator, self).__init__()
- self.mel_channel = n_mel_channels
- self.noise_dim = noise_dim
- self.hop_length = hop_length
- channel_size = channel_size
- kpnet_conv_size = kpnet_conv_size
-
- self.res_stack = nn.ModuleList()
- hop_length = 1
- for stride in strides:
- hop_length = stride * hop_length
- self.res_stack.append(
- LVCBlock(
- channel_size,
- n_mel_channels,
- stride=stride,
- dilations=dilations,
- lReLU_slope=lReLU_slope,
- cond_hop_length=hop_length,
- kpnet_conv_size=kpnet_conv_size
- )
- )
-
- self.conv_pre = \
- nn.utils.weight_norm(nn.Conv1d(noise_dim, channel_size, 7, padding=3, padding_mode='reflect'))
-
- self.conv_post = nn.Sequential(
- nn.LeakyReLU(lReLU_slope),
- nn.utils.weight_norm(nn.Conv1d(channel_size, 1, 7, padding=3, padding_mode='reflect')),
- nn.Tanh(),
- )
-
- def forward(self, c, z):
- '''
- Args:
- c (Tensor): the conditioning sequence of mel-spectrogram (batch, mel_channels, in_length)
- z (Tensor): the noise sequence (batch, noise_dim, in_length)
-
- '''
- z = self.conv_pre(z) # (B, c_g, L)
-
- for res_block in self.res_stack:
- res_block.to(z.device)
- z = res_block(z, c) # (B, c_g, L * s_0 * ... * s_i)
-
- z = self.conv_post(z) # (B, 1, L * 256)
-
- return z
-
- def eval(self, inference=False):
- super(UnivNetGenerator, self).eval()
- # don't remove weight norm while validation in training loop
- if inference:
- self.remove_weight_norm()
-
- def remove_weight_norm(self):
- print('Removing weight norm...')
-
- nn.utils.remove_weight_norm(self.conv_pre)
-
- for layer in self.conv_post:
- if len(layer.state_dict()) != 0:
- nn.utils.remove_weight_norm(layer)
-
- for res_block in self.res_stack:
- res_block.remove_weight_norm()
-
- def inference(self, c, z=None):
- # pad input mel with zeros to cut artifact
- # see https://github.com/seungwonpark/melgan/issues/8
- zero = torch.full((c.shape[0], self.mel_channel, 10), -11.5129).to(c.device)
- mel = torch.cat((c, zero), dim=2)
-
- if z is None:
- z = torch.randn(c.shape[0], self.noise_dim, mel.size(2)).to(mel.device)
-
- audio = self.forward(mel, z)
- audio = audio[:, :, :-(self.hop_length * 10)]
- audio = audio.clamp(min=-1, max=1)
- return audio
-
-
-if __name__ == '__main__':
- model = UnivNetGenerator()
-
- c = torch.randn(3, 100, 10)
- z = torch.randn(3, 64, 10)
- print(c.shape)
-
- y = model(c, z)
- print(y.shape)
- assert y.shape == torch.Size([3, 1, 2560])
-
- pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
- print(pytorch_total_params)
diff --git a/spaces/jerpint/buster/generate_embeddings.py b/spaces/jerpint/buster/generate_embeddings.py
deleted file mode 100644
index 476aaaa981866da9760ea088df37cc345c207ef4..0000000000000000000000000000000000000000
--- a/spaces/jerpint/buster/generate_embeddings.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-import zipfile
-import requests
-import pandas as pd
-import time
-
-from buster.documents_manager import DeepLakeDocumentsManager
-
-from buster.docparser import get_all_documents
-from buster.parser import HuggingfaceParser
-
-hf_transformers_zip_url = "https://huggingface.co/datasets/hf-doc-build/doc-build/resolve/main/transformers/main.zip"
-
-
-def download_and_unzip(zip_url, target_dir, overwrite=False):
- """Download a zip file from zip_url and unzip it to target_dir.
-
- # Example usage
- zip_url = "https://example.com/example.zip"
- target_dir = "downloaded_files"
- download_and_unzip(zip_url, target_dir, overwrite=True)
-
- ChatGPT generated.
- """
- # Create the target directory if it doesn't exist
- if not os.path.exists(target_dir):
- os.makedirs(target_dir)
-
- # Get the filename from the zip_url
- zip_filename = os.path.basename(zip_url)
- target_path = os.path.join(target_dir, zip_filename)
-
- # Check if the file already exists
- if os.path.exists(target_path) and not overwrite:
- print(f"{zip_filename} already exists in the target directory.")
- return
-
- # Download the zip file
- response = requests.get(zip_url, stream=True)
- if response.status_code == 200:
- with open(target_path, "wb") as file:
- for chunk in response.iter_content(chunk_size=8192):
- file.write(chunk)
- print(f"{zip_filename} downloaded successfully.")
-
- # Unzip the file
- with zipfile.ZipFile(target_path, "r") as zip_ref:
- zip_ref.extractall(target_dir)
- print(f"{zip_filename} extracted successfully.")
- else:
- print(f"Failed to download {zip_filename}. Status code: {response.status_code}")
-
-
-# Download the tranformers html pages and unzip it
-download_and_unzip(zip_url=hf_transformers_zip_url, target_dir=".")
-
-# Extract all documents from the html into a dataframe
-df = get_all_documents(
- root_dir="transformers/main/en/",
- base_url="https://huggingface.co/docs/transformers/main/en/",
- parser_cls=HuggingfaceParser,
- min_section_length=100,
- max_section_length=1000,
-)
-
-# Add the source column
-df["source"] = "hf_transformers"
-
-# Save the .csv with chunks to disk
-df.to_csv("hf_transformers.csv")
-
-# Initialize the vector store
-dm = DeepLakeDocumentsManager(
- vector_store_path="deeplake_store",
- overwrite=True,
- required_columns=["url", "content", "source", "title"],
-)
-
-# Add all embeddings to the vector store
-dm.batch_add(
- df=df,
- batch_size=3000,
- min_time_interval=60,
- num_workers=32,
- csv_filename="embeddings.csv",
- csv_overwrite=False,
-)
diff --git a/spaces/jessica6105/Lu-Bert-VITS2/transforms.py b/spaces/jessica6105/Lu-Bert-VITS2/transforms.py
deleted file mode 100644
index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000
--- a/spaces/jessica6105/Lu-Bert-VITS2/transforms.py
+++ /dev/null
@@ -1,209 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1
-
-
-def unconstrained_rational_quadratic_spline(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails="linear",
- tail_bound=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == "linear":
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError("{} tails are not implemented.".format(tails))
-
- (
- outputs[inside_interval_mask],
- logabsdet[inside_interval_mask],
- ) = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound,
- right=tail_bound,
- bottom=-tail_bound,
- top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- )
-
- return outputs, logabsdet
-
-
-def rational_quadratic_spline(
- inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0.0,
- right=1.0,
- bottom=0.0,
- top=1.0,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE,
-):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError("Input to a transform is not within its domain")
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError("Minimal bin width too large for the number of bins")
- if min_bin_height * num_bins > 1.0:
- raise ValueError("Minimal bin height too large for the number of bins")
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (inputs - input_cumheights) * (
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
- ) + input_heights * (input_delta - input_derivatives)
- b = input_heights * input_derivatives - (inputs - input_cumheights) * (
- input_derivatives + input_derivatives_plus_one - 2 * input_delta
- )
- c = -input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + (
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta
- )
- derivative_numerator = input_delta.pow(2) * (
- input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2)
- )
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (
- input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
- )
- denominator = input_delta + (
- (input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta
- )
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (
- input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2)
- )
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/jhwen/bingo/postcss.config.js b/spaces/jhwen/bingo/postcss.config.js
deleted file mode 100644
index 33ad091d26d8a9dc95ebdf616e217d985ec215b8..0000000000000000000000000000000000000000
--- a/spaces/jhwen/bingo/postcss.config.js
+++ /dev/null
@@ -1,6 +0,0 @@
-module.exports = {
- plugins: {
- tailwindcss: {},
- autoprefixer: {},
- },
-}
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_ctr.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_ctr.py
deleted file mode 100644
index 81f2be9c3592f0fd366133e54dc2888f8ceef22d..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/Crypto/Cipher/_mode_ctr.py
+++ /dev/null
@@ -1,393 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Cipher/mode_ctr.py : CTR mode
-#
-# ===================================================================
-# The contents of this file are dedicated to the public domain. To
-# the extent that dedication to the public domain is not available,
-# everyone is granted a worldwide, perpetual, royalty-free,
-# non-exclusive license to exercise all rights associated with the
-# contents of this file for any purpose whatsoever.
-# No rights are reserved.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-# ===================================================================
-
-"""
-Counter (CTR) mode.
-"""
-
-__all__ = ['CtrMode']
-
-import struct
-
-from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, VoidPointer,
- create_string_buffer, get_raw_buffer,
- SmartPointer, c_size_t, c_uint8_ptr,
- is_writeable_buffer)
-
-from Crypto.Random import get_random_bytes
-from Crypto.Util.py3compat import _copy_bytes, is_native_int
-from Crypto.Util.number import long_to_bytes
-
-raw_ctr_lib = load_pycryptodome_raw_lib("Crypto.Cipher._raw_ctr", """
- int CTR_start_operation(void *cipher,
- uint8_t initialCounterBlock[],
- size_t initialCounterBlock_len,
- size_t prefix_len,
- unsigned counter_len,
- unsigned littleEndian,
- void **pResult);
- int CTR_encrypt(void *ctrState,
- const uint8_t *in,
- uint8_t *out,
- size_t data_len);
- int CTR_decrypt(void *ctrState,
- const uint8_t *in,
- uint8_t *out,
- size_t data_len);
- int CTR_stop_operation(void *ctrState);"""
- )
-
-
-class CtrMode(object):
- """*CounTeR (CTR)* mode.
-
- This mode is very similar to ECB, in that
- encryption of one block is done independently of all other blocks.
-
- Unlike ECB, the block *position* contributes to the encryption
- and no information leaks about symbol frequency.
-
- Each message block is associated to a *counter* which
- must be unique across all messages that get encrypted
- with the same key (not just within the same message).
- The counter is as big as the block size.
-
- Counters can be generated in several ways. The most
- straightword one is to choose an *initial counter block*
- (which can be made public, similarly to the *IV* for the
- other modes) and increment its lowest **m** bits by one
- (modulo *2^m*) for each block. In most cases, **m** is
- chosen to be half the block size.
-
- See `NIST SP800-38A`_, Section 6.5 (for the mode) and
- Appendix B (for how to manage the *initial counter block*).
-
- .. _`NIST SP800-38A` : http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
-
- :undocumented: __init__
- """
-
- def __init__(self, block_cipher, initial_counter_block,
- prefix_len, counter_len, little_endian):
- """Create a new block cipher, configured in CTR mode.
-
- :Parameters:
- block_cipher : C pointer
- A smart pointer to the low-level block cipher instance.
-
- initial_counter_block : bytes/bytearray/memoryview
- The initial plaintext to use to generate the key stream.
-
- It is as large as the cipher block, and it embeds
- the initial value of the counter.
-
- This value must not be reused.
- It shall contain a nonce or a random component.
- Reusing the *initial counter block* for encryptions
- performed with the same key compromises confidentiality.
-
- prefix_len : integer
- The amount of bytes at the beginning of the counter block
- that never change.
-
- counter_len : integer
- The length in bytes of the counter embedded in the counter
- block.
-
- little_endian : boolean
- True if the counter in the counter block is an integer encoded
- in little endian mode. If False, it is big endian.
- """
-
- if len(initial_counter_block) == prefix_len + counter_len:
- self.nonce = _copy_bytes(None, prefix_len, initial_counter_block)
- """Nonce; not available if there is a fixed suffix"""
-
- self._state = VoidPointer()
- result = raw_ctr_lib.CTR_start_operation(block_cipher.get(),
- c_uint8_ptr(initial_counter_block),
- c_size_t(len(initial_counter_block)),
- c_size_t(prefix_len),
- counter_len,
- little_endian,
- self._state.address_of())
- if result:
- raise ValueError("Error %X while instantiating the CTR mode"
- % result)
-
- # Ensure that object disposal of this Python object will (eventually)
- # free the memory allocated by the raw library for the cipher mode
- self._state = SmartPointer(self._state.get(),
- raw_ctr_lib.CTR_stop_operation)
-
- # Memory allocated for the underlying block cipher is now owed
- # by the cipher mode
- block_cipher.release()
-
- self.block_size = len(initial_counter_block)
- """The block size of the underlying cipher, in bytes."""
-
- self._next = ["encrypt", "decrypt"]
-
- def encrypt(self, plaintext, output=None):
- """Encrypt data with the key and the parameters set at initialization.
-
- A cipher object is stateful: once you have encrypted a message
- you cannot encrypt (or decrypt) another message using the same
- object.
-
- The data to encrypt can be broken up in two or
- more pieces and `encrypt` can be called multiple times.
-
- That is, the statement:
-
- >>> c.encrypt(a) + c.encrypt(b)
-
- is equivalent to:
-
- >>> c.encrypt(a+b)
-
- This function does not add any padding to the plaintext.
-
- :Parameters:
- plaintext : bytes/bytearray/memoryview
- The piece of data to encrypt.
- It can be of any length.
- :Keywords:
- output : bytearray/memoryview
- The location where the ciphertext must be written to.
- If ``None``, the ciphertext is returned.
- :Return:
- If ``output`` is ``None``, the ciphertext is returned as ``bytes``.
- Otherwise, ``None``.
- """
-
- if "encrypt" not in self._next:
- raise TypeError("encrypt() cannot be called after decrypt()")
- self._next = ["encrypt"]
-
- if output is None:
- ciphertext = create_string_buffer(len(plaintext))
- else:
- ciphertext = output
-
- if not is_writeable_buffer(output):
- raise TypeError("output must be a bytearray or a writeable memoryview")
-
- if len(plaintext) != len(output):
- raise ValueError("output must have the same length as the input"
- " (%d bytes)" % len(plaintext))
-
- result = raw_ctr_lib.CTR_encrypt(self._state.get(),
- c_uint8_ptr(plaintext),
- c_uint8_ptr(ciphertext),
- c_size_t(len(plaintext)))
- if result:
- if result == 0x60002:
- raise OverflowError("The counter has wrapped around in"
- " CTR mode")
- raise ValueError("Error %X while encrypting in CTR mode" % result)
-
- if output is None:
- return get_raw_buffer(ciphertext)
- else:
- return None
-
- def decrypt(self, ciphertext, output=None):
- """Decrypt data with the key and the parameters set at initialization.
-
- A cipher object is stateful: once you have decrypted a message
- you cannot decrypt (or encrypt) another message with the same
- object.
-
- The data to decrypt can be broken up in two or
- more pieces and `decrypt` can be called multiple times.
-
- That is, the statement:
-
- >>> c.decrypt(a) + c.decrypt(b)
-
- is equivalent to:
-
- >>> c.decrypt(a+b)
-
- This function does not remove any padding from the plaintext.
-
- :Parameters:
- ciphertext : bytes/bytearray/memoryview
- The piece of data to decrypt.
- It can be of any length.
- :Keywords:
- output : bytearray/memoryview
- The location where the plaintext must be written to.
- If ``None``, the plaintext is returned.
- :Return:
- If ``output`` is ``None``, the plaintext is returned as ``bytes``.
- Otherwise, ``None``.
- """
-
- if "decrypt" not in self._next:
- raise TypeError("decrypt() cannot be called after encrypt()")
- self._next = ["decrypt"]
-
- if output is None:
- plaintext = create_string_buffer(len(ciphertext))
- else:
- plaintext = output
-
- if not is_writeable_buffer(output):
- raise TypeError("output must be a bytearray or a writeable memoryview")
-
- if len(ciphertext) != len(output):
- raise ValueError("output must have the same length as the input"
- " (%d bytes)" % len(plaintext))
-
- result = raw_ctr_lib.CTR_decrypt(self._state.get(),
- c_uint8_ptr(ciphertext),
- c_uint8_ptr(plaintext),
- c_size_t(len(ciphertext)))
- if result:
- if result == 0x60002:
- raise OverflowError("The counter has wrapped around in"
- " CTR mode")
- raise ValueError("Error %X while decrypting in CTR mode" % result)
-
- if output is None:
- return get_raw_buffer(plaintext)
- else:
- return None
-
-
-def _create_ctr_cipher(factory, **kwargs):
- """Instantiate a cipher object that performs CTR encryption/decryption.
-
- :Parameters:
- factory : module
- The underlying block cipher, a module from ``Crypto.Cipher``.
-
- :Keywords:
- nonce : bytes/bytearray/memoryview
- The fixed part at the beginning of the counter block - the rest is
- the counter number that gets increased when processing the next block.
- The nonce must be such that no two messages are encrypted under the
- same key and the same nonce.
-
- The nonce must be shorter than the block size (it can have
- zero length; the counter is then as long as the block).
-
- If this parameter is not present, a random nonce will be created with
- length equal to half the block size. No random nonce shorter than
- 64 bits will be created though - you must really think through all
- security consequences of using such a short block size.
-
- initial_value : posive integer or bytes/bytearray/memoryview
- The initial value for the counter. If not present, the cipher will
- start counting from 0. The value is incremented by one for each block.
- The counter number is encoded in big endian mode.
-
- counter : object
- Instance of ``Crypto.Util.Counter``, which allows full customization
- of the counter block. This parameter is incompatible to both ``nonce``
- and ``initial_value``.
-
- Any other keyword will be passed to the underlying block cipher.
- See the relevant documentation for details (at least ``key`` will need
- to be present).
- """
-
- cipher_state = factory._create_base_cipher(kwargs)
-
- counter = kwargs.pop("counter", None)
- nonce = kwargs.pop("nonce", None)
- initial_value = kwargs.pop("initial_value", None)
- if kwargs:
- raise TypeError("Invalid parameters for CTR mode: %s" % str(kwargs))
-
- if counter is not None and (nonce, initial_value) != (None, None):
- raise TypeError("'counter' and 'nonce'/'initial_value'"
- " are mutually exclusive")
-
- if counter is None:
- # Crypto.Util.Counter is not used
- if nonce is None:
- if factory.block_size < 16:
- raise TypeError("Impossible to create a safe nonce for short"
- " block sizes")
- nonce = get_random_bytes(factory.block_size // 2)
- else:
- if len(nonce) >= factory.block_size:
- raise ValueError("Nonce is too long")
-
- # What is not nonce is counter
- counter_len = factory.block_size - len(nonce)
-
- if initial_value is None:
- initial_value = 0
-
- if is_native_int(initial_value):
- if (1 << (counter_len * 8)) - 1 < initial_value:
- raise ValueError("Initial counter value is too large")
- initial_counter_block = nonce + long_to_bytes(initial_value, counter_len)
- else:
- if len(initial_value) != counter_len:
- raise ValueError("Incorrect length for counter byte string (%d bytes, expected %d)" %
- (len(initial_value), counter_len))
- initial_counter_block = nonce + initial_value
-
- return CtrMode(cipher_state,
- initial_counter_block,
- len(nonce), # prefix
- counter_len,
- False) # little_endian
-
- # Crypto.Util.Counter is used
-
- # 'counter' used to be a callable object, but now it is
- # just a dictionary for backward compatibility.
- _counter = dict(counter)
- try:
- counter_len = _counter.pop("counter_len")
- prefix = _counter.pop("prefix")
- suffix = _counter.pop("suffix")
- initial_value = _counter.pop("initial_value")
- little_endian = _counter.pop("little_endian")
- except KeyError:
- raise TypeError("Incorrect counter object"
- " (use Crypto.Util.Counter.new)")
-
- # Compute initial counter block
- words = []
- while initial_value > 0:
- words.append(struct.pack('B', initial_value & 255))
- initial_value >>= 8
- words += [b'\x00'] * max(0, counter_len - len(words))
- if not little_endian:
- words.reverse()
- initial_counter_block = prefix + b"".join(words) + suffix
-
- if len(initial_counter_block) != factory.block_size:
- raise ValueError("Size of the counter block (%d bytes) must match"
- " block size (%d)" % (len(initial_counter_block),
- factory.block_size))
-
- return CtrMode(cipher_state, initial_counter_block,
- len(prefix), counter_len, little_endian)
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageFilter.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageFilter.py
deleted file mode 100644
index 33bc7cc2e30ea9a0f95cc884de151643915848fa..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/PIL/ImageFilter.py
+++ /dev/null
@@ -1,550 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# standard filters
-#
-# History:
-# 1995-11-27 fl Created
-# 2002-06-08 fl Added rank and mode filters
-# 2003-09-15 fl Fixed rank calculation in rank filter; added expand call
-#
-# Copyright (c) 1997-2003 by Secret Labs AB.
-# Copyright (c) 1995-2002 by Fredrik Lundh.
-#
-# See the README file for information on usage and redistribution.
-#
-import functools
-
-
-class Filter:
- pass
-
-
-class MultibandFilter(Filter):
- pass
-
-
-class BuiltinFilter(MultibandFilter):
- def filter(self, image):
- if image.mode == "P":
- msg = "cannot filter palette images"
- raise ValueError(msg)
- return image.filter(*self.filterargs)
-
-
-class Kernel(BuiltinFilter):
- """
- Create a convolution kernel. The current version only
- supports 3x3 and 5x5 integer and floating point kernels.
-
- In the current version, kernels can only be applied to
- "L" and "RGB" images.
-
- :param size: Kernel size, given as (width, height). In the current
- version, this must be (3,3) or (5,5).
- :param kernel: A sequence containing kernel weights. The kernel will
- be flipped vertically before being applied to the image.
- :param scale: Scale factor. If given, the result for each pixel is
- divided by this value. The default is the sum of the
- kernel weights.
- :param offset: Offset. If given, this value is added to the result,
- after it has been divided by the scale factor.
- """
-
- name = "Kernel"
-
- def __init__(self, size, kernel, scale=None, offset=0):
- if scale is None:
- # default scale is sum of kernel
- scale = functools.reduce(lambda a, b: a + b, kernel)
- if size[0] * size[1] != len(kernel):
- msg = "not enough coefficients in kernel"
- raise ValueError(msg)
- self.filterargs = size, scale, offset, kernel
-
-
-class RankFilter(Filter):
- """
- Create a rank filter. The rank filter sorts all pixels in
- a window of the given size, and returns the ``rank``'th value.
-
- :param size: The kernel size, in pixels.
- :param rank: What pixel value to pick. Use 0 for a min filter,
- ``size * size / 2`` for a median filter, ``size * size - 1``
- for a max filter, etc.
- """
-
- name = "Rank"
-
- def __init__(self, size, rank):
- self.size = size
- self.rank = rank
-
- def filter(self, image):
- if image.mode == "P":
- msg = "cannot filter palette images"
- raise ValueError(msg)
- image = image.expand(self.size // 2, self.size // 2)
- return image.rankfilter(self.size, self.rank)
-
-
-class MedianFilter(RankFilter):
- """
- Create a median filter. Picks the median pixel value in a window with the
- given size.
-
- :param size: The kernel size, in pixels.
- """
-
- name = "Median"
-
- def __init__(self, size=3):
- self.size = size
- self.rank = size * size // 2
-
-
-class MinFilter(RankFilter):
- """
- Create a min filter. Picks the lowest pixel value in a window with the
- given size.
-
- :param size: The kernel size, in pixels.
- """
-
- name = "Min"
-
- def __init__(self, size=3):
- self.size = size
- self.rank = 0
-
-
-class MaxFilter(RankFilter):
- """
- Create a max filter. Picks the largest pixel value in a window with the
- given size.
-
- :param size: The kernel size, in pixels.
- """
-
- name = "Max"
-
- def __init__(self, size=3):
- self.size = size
- self.rank = size * size - 1
-
-
-class ModeFilter(Filter):
- """
- Create a mode filter. Picks the most frequent pixel value in a box with the
- given size. Pixel values that occur only once or twice are ignored; if no
- pixel value occurs more than twice, the original pixel value is preserved.
-
- :param size: The kernel size, in pixels.
- """
-
- name = "Mode"
-
- def __init__(self, size=3):
- self.size = size
-
- def filter(self, image):
- return image.modefilter(self.size)
-
-
-class GaussianBlur(MultibandFilter):
- """Blurs the image with a sequence of extended box filters, which
- approximates a Gaussian kernel. For details on accuracy see
-
-
- :param radius: Standard deviation of the Gaussian kernel.
- """
-
- name = "GaussianBlur"
-
- def __init__(self, radius=2):
- self.radius = radius
-
- def filter(self, image):
- return image.gaussian_blur(self.radius)
-
-
-class BoxBlur(MultibandFilter):
- """Blurs the image by setting each pixel to the average value of the pixels
- in a square box extending radius pixels in each direction.
- Supports float radius of arbitrary size. Uses an optimized implementation
- which runs in linear time relative to the size of the image
- for any radius value.
-
- :param radius: Size of the box in one direction. Radius 0 does not blur,
- returns an identical image. Radius 1 takes 1 pixel
- in each direction, i.e. 9 pixels in total.
- """
-
- name = "BoxBlur"
-
- def __init__(self, radius):
- if radius < 0:
- msg = "radius must be >= 0"
- raise ValueError(msg)
- self.radius = radius
-
- def filter(self, image):
- return image.box_blur(self.radius)
-
-
-class UnsharpMask(MultibandFilter):
- """Unsharp mask filter.
-
- See Wikipedia's entry on `digital unsharp masking`_ for an explanation of
- the parameters.
-
- :param radius: Blur Radius
- :param percent: Unsharp strength, in percent
- :param threshold: Threshold controls the minimum brightness change that
- will be sharpened
-
- .. _digital unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking#Digital_unsharp_masking
-
- """ # noqa: E501
-
- name = "UnsharpMask"
-
- def __init__(self, radius=2, percent=150, threshold=3):
- self.radius = radius
- self.percent = percent
- self.threshold = threshold
-
- def filter(self, image):
- return image.unsharp_mask(self.radius, self.percent, self.threshold)
-
-
-class BLUR(BuiltinFilter):
- name = "Blur"
- # fmt: off
- filterargs = (5, 5), 16, 0, (
- 1, 1, 1, 1, 1,
- 1, 0, 0, 0, 1,
- 1, 0, 0, 0, 1,
- 1, 0, 0, 0, 1,
- 1, 1, 1, 1, 1,
- )
- # fmt: on
-
-
-class CONTOUR(BuiltinFilter):
- name = "Contour"
- # fmt: off
- filterargs = (3, 3), 1, 255, (
- -1, -1, -1,
- -1, 8, -1,
- -1, -1, -1,
- )
- # fmt: on
-
-
-class DETAIL(BuiltinFilter):
- name = "Detail"
- # fmt: off
- filterargs = (3, 3), 6, 0, (
- 0, -1, 0,
- -1, 10, -1,
- 0, -1, 0,
- )
- # fmt: on
-
-
-class EDGE_ENHANCE(BuiltinFilter):
- name = "Edge-enhance"
- # fmt: off
- filterargs = (3, 3), 2, 0, (
- -1, -1, -1,
- -1, 10, -1,
- -1, -1, -1,
- )
- # fmt: on
-
-
-class EDGE_ENHANCE_MORE(BuiltinFilter):
- name = "Edge-enhance More"
- # fmt: off
- filterargs = (3, 3), 1, 0, (
- -1, -1, -1,
- -1, 9, -1,
- -1, -1, -1,
- )
- # fmt: on
-
-
-class EMBOSS(BuiltinFilter):
- name = "Emboss"
- # fmt: off
- filterargs = (3, 3), 1, 128, (
- -1, 0, 0,
- 0, 1, 0,
- 0, 0, 0,
- )
- # fmt: on
-
-
-class FIND_EDGES(BuiltinFilter):
- name = "Find Edges"
- # fmt: off
- filterargs = (3, 3), 1, 0, (
- -1, -1, -1,
- -1, 8, -1,
- -1, -1, -1,
- )
- # fmt: on
-
-
-class SHARPEN(BuiltinFilter):
- name = "Sharpen"
- # fmt: off
- filterargs = (3, 3), 16, 0, (
- -2, -2, -2,
- -2, 32, -2,
- -2, -2, -2,
- )
- # fmt: on
-
-
-class SMOOTH(BuiltinFilter):
- name = "Smooth"
- # fmt: off
- filterargs = (3, 3), 13, 0, (
- 1, 1, 1,
- 1, 5, 1,
- 1, 1, 1,
- )
- # fmt: on
-
-
-class SMOOTH_MORE(BuiltinFilter):
- name = "Smooth More"
- # fmt: off
- filterargs = (5, 5), 100, 0, (
- 1, 1, 1, 1, 1,
- 1, 5, 5, 5, 1,
- 1, 5, 44, 5, 1,
- 1, 5, 5, 5, 1,
- 1, 1, 1, 1, 1,
- )
- # fmt: on
-
-
-class Color3DLUT(MultibandFilter):
- """Three-dimensional color lookup table.
-
- Transforms 3-channel pixels using the values of the channels as coordinates
- in the 3D lookup table and interpolating the nearest elements.
-
- This method allows you to apply almost any color transformation
- in constant time by using pre-calculated decimated tables.
-
- .. versionadded:: 5.2.0
-
- :param size: Size of the table. One int or tuple of (int, int, int).
- Minimal size in any dimension is 2, maximum is 65.
- :param table: Flat lookup table. A list of ``channels * size**3``
- float elements or a list of ``size**3`` channels-sized
- tuples with floats. Channels are changed first,
- then first dimension, then second, then third.
- Value 0.0 corresponds lowest value of output, 1.0 highest.
- :param channels: Number of channels in the table. Could be 3 or 4.
- Default is 3.
- :param target_mode: A mode for the result image. Should have not less
- than ``channels`` channels. Default is ``None``,
- which means that mode wouldn't be changed.
- """
-
- name = "Color 3D LUT"
-
- def __init__(self, size, table, channels=3, target_mode=None, **kwargs):
- if channels not in (3, 4):
- msg = "Only 3 or 4 output channels are supported"
- raise ValueError(msg)
- self.size = size = self._check_size(size)
- self.channels = channels
- self.mode = target_mode
-
- # Hidden flag `_copy_table=False` could be used to avoid extra copying
- # of the table if the table is specially made for the constructor.
- copy_table = kwargs.get("_copy_table", True)
- items = size[0] * size[1] * size[2]
- wrong_size = False
-
- numpy = None
- if hasattr(table, "shape"):
- try:
- import numpy
- except ImportError: # pragma: no cover
- pass
-
- if numpy and isinstance(table, numpy.ndarray):
- if copy_table:
- table = table.copy()
-
- if table.shape in [
- (items * channels,),
- (items, channels),
- (size[2], size[1], size[0], channels),
- ]:
- table = table.reshape(items * channels)
- else:
- wrong_size = True
-
- else:
- if copy_table:
- table = list(table)
-
- # Convert to a flat list
- if table and isinstance(table[0], (list, tuple)):
- table, raw_table = [], table
- for pixel in raw_table:
- if len(pixel) != channels:
- msg = (
- "The elements of the table should "
- f"have a length of {channels}."
- )
- raise ValueError(msg)
- table.extend(pixel)
-
- if wrong_size or len(table) != items * channels:
- msg = (
- "The table should have either channels * size**3 float items "
- "or size**3 items of channels-sized tuples with floats. "
- f"Table should be: {channels}x{size[0]}x{size[1]}x{size[2]}. "
- f"Actual length: {len(table)}"
- )
- raise ValueError(msg)
- self.table = table
-
- @staticmethod
- def _check_size(size):
- try:
- _, _, _ = size
- except ValueError as e:
- msg = "Size should be either an integer or a tuple of three integers."
- raise ValueError(msg) from e
- except TypeError:
- size = (size, size, size)
- size = [int(x) for x in size]
- for size_1d in size:
- if not 2 <= size_1d <= 65:
- msg = "Size should be in [2, 65] range."
- raise ValueError(msg)
- return size
-
- @classmethod
- def generate(cls, size, callback, channels=3, target_mode=None):
- """Generates new LUT using provided callback.
-
- :param size: Size of the table. Passed to the constructor.
- :param callback: Function with three parameters which correspond
- three color channels. Will be called ``size**3``
- times with values from 0.0 to 1.0 and should return
- a tuple with ``channels`` elements.
- :param channels: The number of channels which should return callback.
- :param target_mode: Passed to the constructor of the resulting
- lookup table.
- """
- size_1d, size_2d, size_3d = cls._check_size(size)
- if channels not in (3, 4):
- msg = "Only 3 or 4 output channels are supported"
- raise ValueError(msg)
-
- table = [0] * (size_1d * size_2d * size_3d * channels)
- idx_out = 0
- for b in range(size_3d):
- for g in range(size_2d):
- for r in range(size_1d):
- table[idx_out : idx_out + channels] = callback(
- r / (size_1d - 1), g / (size_2d - 1), b / (size_3d - 1)
- )
- idx_out += channels
-
- return cls(
- (size_1d, size_2d, size_3d),
- table,
- channels=channels,
- target_mode=target_mode,
- _copy_table=False,
- )
-
- def transform(self, callback, with_normals=False, channels=None, target_mode=None):
- """Transforms the table values using provided callback and returns
- a new LUT with altered values.
-
- :param callback: A function which takes old lookup table values
- and returns a new set of values. The number
- of arguments which function should take is
- ``self.channels`` or ``3 + self.channels``
- if ``with_normals`` flag is set.
- Should return a tuple of ``self.channels`` or
- ``channels`` elements if it is set.
- :param with_normals: If true, ``callback`` will be called with
- coordinates in the color cube as the first
- three arguments. Otherwise, ``callback``
- will be called only with actual color values.
- :param channels: The number of channels in the resulting lookup table.
- :param target_mode: Passed to the constructor of the resulting
- lookup table.
- """
- if channels not in (None, 3, 4):
- msg = "Only 3 or 4 output channels are supported"
- raise ValueError(msg)
- ch_in = self.channels
- ch_out = channels or ch_in
- size_1d, size_2d, size_3d = self.size
-
- table = [0] * (size_1d * size_2d * size_3d * ch_out)
- idx_in = 0
- idx_out = 0
- for b in range(size_3d):
- for g in range(size_2d):
- for r in range(size_1d):
- values = self.table[idx_in : idx_in + ch_in]
- if with_normals:
- values = callback(
- r / (size_1d - 1),
- g / (size_2d - 1),
- b / (size_3d - 1),
- *values,
- )
- else:
- values = callback(*values)
- table[idx_out : idx_out + ch_out] = values
- idx_in += ch_in
- idx_out += ch_out
-
- return type(self)(
- self.size,
- table,
- channels=ch_out,
- target_mode=target_mode or self.mode,
- _copy_table=False,
- )
-
- def __repr__(self):
- r = [
- f"{self.__class__.__name__} from {self.table.__class__.__name__}",
- "size={:d}x{:d}x{:d}".format(*self.size),
- f"channels={self.channels:d}",
- ]
- if self.mode:
- r.append(f"target_mode={self.mode}")
- return "<{}>".format(" ".join(r))
-
- def filter(self, image):
- from . import Image
-
- return image.color_lut_3d(
- self.mode or image.mode,
- Image.Resampling.BILINEAR,
- self.channels,
- self.size[0],
- self.size[1],
- self.size[2],
- self.table,
- )
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/expr/consts.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/expr/consts.py
deleted file mode 100644
index 974fb06a3c756a7e27106f4d1bb9c17b78a094fd..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/altair/expr/consts.py
+++ /dev/null
@@ -1,29 +0,0 @@
-from typing import Dict
-
-from .core import ConstExpression
-
-
-CONST_LISTING = {
- "NaN": "not a number (same as JavaScript literal NaN)",
- "LN10": "the natural log of 10 (alias to Math.LN10)",
- "E": "the transcendental number e (alias to Math.E)",
- "LOG10E": "the base 10 logarithm e (alias to Math.LOG10E)",
- "LOG2E": "the base 2 logarithm of e (alias to Math.LOG2E)",
- "SQRT1_2": "the square root of 0.5 (alias to Math.SQRT1_2)",
- "LN2": "the natural log of 2 (alias to Math.LN2)",
- "SQRT2": "the square root of 2 (alias to Math.SQRT1_2)",
- "PI": "the transcendental number pi (alias to Math.PI)",
-}
-
-NAME_MAP: Dict[str, str] = {}
-
-
-def _populate_namespace():
- globals_ = globals()
- for name, doc in CONST_LISTING.items():
- py_name = NAME_MAP.get(name, name)
- globals_[py_name] = ConstExpression(name, doc)
- yield py_name
-
-
-__all__ = list(_populate_namespace())
diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssectypes.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssectypes.py
deleted file mode 100644
index 02131e0adaeb85eb49351f4953c854023315fab9..0000000000000000000000000000000000000000
--- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/dns/dnssectypes.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
-
-# Copyright (C) 2003-2017 Nominum, Inc.
-#
-# Permission to use, copy, modify, and distribute this software and its
-# documentation for any purpose with or without fee is hereby granted,
-# provided that the above copyright notice and this permission notice
-# appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
-# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
-# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
-# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""Common DNSSEC-related types."""
-
-# This is a separate file to avoid import circularity between dns.dnssec and
-# the implementations of the DS and DNSKEY types.
-
-import dns.enum
-
-
-class Algorithm(dns.enum.IntEnum):
- RSAMD5 = 1
- DH = 2
- DSA = 3
- ECC = 4
- RSASHA1 = 5
- DSANSEC3SHA1 = 6
- RSASHA1NSEC3SHA1 = 7
- RSASHA256 = 8
- RSASHA512 = 10
- ECCGOST = 12
- ECDSAP256SHA256 = 13
- ECDSAP384SHA384 = 14
- ED25519 = 15
- ED448 = 16
- INDIRECT = 252
- PRIVATEDNS = 253
- PRIVATEOID = 254
-
- @classmethod
- def _maximum(cls):
- return 255
-
-
-class DSDigest(dns.enum.IntEnum):
- """DNSSEC Delegation Signer Digest Algorithm"""
-
- NULL = 0
- SHA1 = 1
- SHA256 = 2
- GOST = 3
- SHA384 = 4
-
- @classmethod
- def _maximum(cls):
- return 255
-
-
-class NSEC3Hash(dns.enum.IntEnum):
- """NSEC3 hash algorithm"""
-
- SHA1 = 1
-
- @classmethod
- def _maximum(cls):
- return 255
diff --git a/spaces/jonathang/WeatherBoy/app.py b/spaces/jonathang/WeatherBoy/app.py
deleted file mode 100644
index 2967c7d50b0bda649c3814c831e54181893560df..0000000000000000000000000000000000000000
--- a/spaces/jonathang/WeatherBoy/app.py
+++ /dev/null
@@ -1,339 +0,0 @@
-import requests
-import structlog
-import openai
-import os
-import io
-import random
-import tiktoken
-import enum
-import time
-import retrying
-import IPython.display as display
-from base64 import b64decode
-import base64
-from io import BytesIO
-import PIL
-import PIL.Image
-import PIL.ImageDraw
-import PIL.ImageFont
-import gradio as gr
-import cachetools.func
-from huggingface_hub import hf_hub_download
-import concurrent.futures
-import geopy
-
-
-logger = structlog.getLogger()
-weather_api_key = os.environ['WEATHER_API']
-openai.api_key = os.environ.get("OPENAI_KEY", None)
-
-animals = [x.strip() for x in open('animals.txt').readlines()]
-art_styles = [x.strip() for x in open('art_styles.txt').readlines()]
-font_path = hf_hub_download("jonathang/fonts-ttf", "Vogue.ttf")
-other_font_path = hf_hub_download("ybelkada/fonts", "Arial.TTF")
-
-
-@cachetools.cached(cache={})
-def get_lat_long_gmaps(zip):
- api_key = os.environ["GMAPS_API"]
- url = f"https://maps.googleapis.com/maps/api/geocode/json?address={zip}&key={api_key}"
- resp = requests.get(url).json()
- latlng = resp['results'][0]['geometry']['location']
- return latlng['lat'], latlng['lng']
-
-@cachetools.cached(cache={})
-def get_lat_long(zip):
- try:
- loc = geopy.Nominatim(user_agent='weatherboy-gpt').geocode(str(zip))
- return loc.latitude, loc.longitude
- except:
- return get_lat_long_gmaps(zip)
-
-
-class Chat:
- class Model(enum.Enum):
- GPT3_5 = "gpt-3.5-turbo"
- GPT_4 = "gpt-4"
-
- def __init__(self, system, max_length=4096//2):
- self._system = system
- self._max_length = max_length
- self._history = [
- {"role": "system", "content": self._system},
- ]
-
- @classmethod
- def num_tokens_from_text(cls, text, model="gpt-3.5-turbo"):
- """Returns the number of tokens used by some text."""
- encoding = tiktoken.encoding_for_model(model)
- return len(encoding.encode(text))
-
- @classmethod
- def num_tokens_from_messages(cls, messages, model="gpt-3.5-turbo"):
- """Returns the number of tokens used by a list of messages."""
- encoding = tiktoken.encoding_for_model(model)
- num_tokens = 0
- for message in messages:
- num_tokens += 4 # every message follows {role/name}\n{content}\n
- for key, value in message.items():
- num_tokens += len(encoding.encode(value))
- if key == "name": # if there's a name, the role is omitted
- num_tokens += -1 # role is always required and always 1 token
- num_tokens += 2 # every reply is primed with assistant
- return num_tokens
-
- @retrying.retry(stop_max_attempt_number=5, wait_fixed=2000)
- def _msg(self, *args, model=Model.GPT3_5.value, **kwargs):
- return openai.ChatCompletion.create(
- *args,
- model=model,
- messages=self._history,
- **kwargs
- )
-
- def message(self, next_msg=None, **kwargs):
- # TODO: Optimize this if slow through easy caching
- while len(self._history) > 1 and self.num_tokens_from_messages(self._history) > self._max_length:
- logger.info(f'Popping message: {self._history.pop(1)}')
- if next_msg is not None:
- self._history.append({"role": "user", "content": next_msg})
- logger.info('requesting openai...')
- resp = self._msg(**kwargs)
- logger.info('received openai...')
- text = resp.choices[0].message.content
- self._history.append({"role": "assistant", "content": text})
- return text
-
-class Weather:
- def __init__(self, zip_code='10001', api_key=weather_api_key):
- self.zip_code = zip_code
- self.api_key = api_key
-
- @cachetools.func.ttl_cache(maxsize=128, ttl=15*60)
- def get_weather(self):
- lat, long = get_lat_long(self.zip_code)
- url = f"https://forecast.weather.gov/MapClick.php?lat={lat:.2f}&lon={long:.2f}&unit=0&lg=english&FcstType=json"
- headers = {'accept': 'application/json'}
- return requests.get(url, headers=headers).json()
-
- def get_info(self):
- data = self.get_weather()
- new_data = {}
- new_data['now'] = data['currentobservation']
-
- # The 'time' and 'data' keys seem to have hourly/daily data
- # Assuming the first entry in these lists is for the current hour
- new_data['hour'] = {
- 'time': data['time']['startValidTime'][0],
- 'tempLabel': data['time']['tempLabel'][0],
- 'temperature': data['data']['temperature'][0],
- 'pop': data['data']['pop'][0],
- 'weather': data['data']['weather'][0],
- 'iconLink': data['data']['iconLink'][0],
- 'text': data['data']['text'][0],
- }
-
- # And the rest of the 'time' and 'data' lists are for the rest of the day
- new_data['day'] = {
- 'time': data['time']['startValidTime'][1:],
- 'tempLabel': data['time']['tempLabel'][1:],
- 'temperature': data['data']['temperature'][1:],
- 'pop': data['data']['pop'][1:],
- 'weather': data['data']['weather'][1:],
- 'iconLink': data['data']['iconLink'][1:],
- 'text': data['data']['text'][1:],
- }
-
- return new_data
-
-
-class Image:
- class Size(enum.Enum):
- SMALL = "256x256"
- MEDIUM = "512x512"
- LARGE = "1024x1024"
-
- @classmethod
- @retrying.retry(stop_max_attempt_number=5, wait_fixed=2000)
- def create(cls, prompt, n=1, size=Size.SMALL):
- logger.info('requesting openai.Image...')
- resp = openai.Image.create(prompt=prompt, n=n, size=size.value, response_format='b64_json')
- logger.info('received openai.Image...')
- if n == 1: return resp["data"][0]
- return resp["data"]
-
-def create_collage(image1, image2, image3, image4):
- # assuming images are the same size
- width, height = image1.size
-
- new_img = PIL.Image.new('RGB', (2 * width, 2 * height))
-
- # place images in collage image
- new_img.paste(image1, (0,0))
- new_img.paste(image2, (width, 0))
- new_img.paste(image3, (0, height))
- new_img.paste(image4, (width, height))
-
- return new_img
-
-def overlay_text_on_image(img, text, position, text_color=(255, 255, 255), box_color=(0, 0, 0, 128), decode=False):
- # Convert the base64 string back to an image
- if decode:
- img_bytes = base64.b64decode(img)
- img = PIL.Image.open(BytesIO(img_bytes))
-
- # Get image dimensions
- img_width, img_height = img.size
-
- # Create a ImageDraw object
- draw = PIL.ImageDraw.Draw(img)
-
- # Reduce the font size until it fits the image width or height
- l, r = 1, 50
- while l < r:
- font_size = (l + r) // 2
- font = PIL.ImageFont.truetype(font_path, font_size)
- left, upper, right, lower = draw.textbbox((0, 0), text, font=font)
- text_width = right - left
- text_height = lower - upper
- if text_width <= img_width and text_height <= img_height:
- l = font_size + 1
- else:
- r = font_size - 1
- font_size = max(l-1, 1)
-
- left, upper, right, lower = draw.textbbox((0, 0), text, font=font)
- text_width = right - left
- text_height = lower - upper
-
- if position == 'top-left':
- x, y = 0, 0
- elif position == 'top-right':
- x, y = img_width - text_width, 0
- elif position == 'bottom-left':
- x, y = 0, img_height - text_height
- elif position == 'bottom-right':
- x, y = img_width - text_width, img_height - text_height
- else:
- raise ValueError("Position should be 'top-left', 'top-right', 'bottom-left' or 'bottom-right'.")
-
- # Draw a semi-transparent box around the text
- draw.rectangle([x, y, x + text_width, y + text_height], fill=box_color)
-
- # Draw the text on the image
- draw.text((x, y), text, font=font, fill=text_color)
-
- return img
-
-
-class WeatherDraw:
- def clean_text(self, weather_info):
- chat = Chat("Given the following weather conditions, write a very small, concise plaintext summary that will overlay on top of an image.")
- text = chat.message(str(weather_info))
- return text
-
- def generate_image(self, weather_info, **kwargs):
- animal = random.choice(animals)
- logger.info(f"Got animal {animal}")
- chat = Chat(f'''Given
-the following weather conditions, write a plaintext, short, and vivid description of an
-image of an adorable anthropomorphised {animal} doing an activity in the weather.
-The image should make obvious what the weather is.
-The animal should be extremely anthropomorphised.
-Only write the short description and nothing else.
-Do not include specific numbers.'''.replace('\n', ' '))
- description = chat.message(str(weather_info))
- hd_modifiers = """3840x2160
-8k 3D / 16k 3D
-8k resolution / 16k resolution
-Detailed
-Ultra HD
-Ultrafine detail
-""".split('\n')
- prompt = f'{random.choice(art_styles)} of {description} {random.choice(hd_modifiers)}'
- logger.info(prompt)
- img = Image.create(prompt, **kwargs)
- return img["b64_json"], prompt
-
- def step_one_forecast(self, weather_info, **kwargs):
- img, txt = self.generate_image(weather_info, **kwargs)
- # text = self.clean_text(weather_info)
- # return overlay_text_on_image(img, text, 'bottom-left')
- return img, txt
-
- def weather_img(self, weather_data):
- import io
- # Create a new image with white background
- image = PIL.Image.new('RGB', (256, 256), (255, 255, 255))
- draw = PIL.ImageDraw.Draw(image)
-
- # Load a font
- font = PIL.ImageFont.truetype(other_font_path, 12)
-
- # Draw text on the image
- y_text = 5
- items_to_display = {
- 'now': {'Temperature': weather_data['now']['Temp'],
- 'Condition': weather_data['now']['Weather'],},
- 'hour': {'Temperature': weather_data['hour']['temperature'],
- 'Condition': weather_data['hour']['weather']},
- 'day': {'High': int(max(float(t) for t in weather_data['day']['temperature'])),
- 'Low': int(min(float(t) for t in weather_data['day']['temperature'])),
- 'Condition': weather_data['day']['weather'][0]},
- }
-
- for category, values in items_to_display.items():
- draw.text((5, y_text), category, font=font, fill=(0, 0, 0))
- y_text += 15
- for key, value in values.items():
- text = f"{key}: {value}"
- draw.text((10, y_text), text, font=font, fill=(0, 0, 0))
- y_text += 15
-
- # Download the weather condition icon for now, day and next hour
- for index, time in enumerate(items_to_display.keys()):
- if time == 'day':
- icon_url = weather_data['day']['iconLink'][0]
- elif time == 'now':
- icon_url = 'https://forecast.weather.gov/newimages/medium/'+weather_data['now']['Weatherimage']
- else:
- icon_url = weather_data[time]['iconLink']
- try:
- response = requests.get(icon_url)
- icon = PIL.Image.open(io.BytesIO(response.content))
- except:
- print(time, icon_url)
- continue
- # Resize the icon
- icon = icon.resize((60, 60))
- # Paste the icon on the image
- image.paste(icon, (index*70 + 10, 190))
-
- return image
-
- def step(self, zip_code='10001', **kwargs):
- forecast = Weather(zip_code).get_info()
- images, texts = [], []
- with concurrent.futures.ThreadPoolExecutor(max_workers=4) as e:
- runs = {}
- for time, data in forecast.items():
- if time == 'etc': continue
- runs[e.submit(self.step_one_forecast, data, **kwargs)] = time, data
- for r in concurrent.futures.as_completed(runs.keys()):
- img, txt = r.result()
- time, data = runs[r]
- images.append(overlay_text_on_image(img, time, 'top-right', decode=True))
- # images.append(overlay_text_on_image(img, '', 'top-right', decode=True))
- texts.append(txt)
- return create_collage(*images, self.weather_img(forecast)), *texts, str(forecast)
-
-
-# Define Gradio interface
-iface = gr.Interface(fn=WeatherDraw().step,
- inputs=gr.inputs.Textbox(label="Enter Zipcode"),
- outputs=[gr.outputs.Image(type='pil'), "text", "text", "text", "text"],
- title="US Zipcode Weather",
- description="Enter a US Zipcode and get some weather.")
-
-# Run the interface
-iface.launch()
\ No newline at end of file
diff --git a/spaces/jordonpeter01/MusicGen2/audiocraft/models/loaders.py b/spaces/jordonpeter01/MusicGen2/audiocraft/models/loaders.py
deleted file mode 100644
index 97c662c3212b7695669cbfc5214ff2f099c3f319..0000000000000000000000000000000000000000
--- a/spaces/jordonpeter01/MusicGen2/audiocraft/models/loaders.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright (c) Meta Platforms, Inc. and affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Utility functions to load from the checkpoints.
-Each checkpoint is a torch.saved dict with the following keys:
-- 'xp.cfg': the hydra config as dumped during training. This should be used
- to rebuild the object using the audiocraft.models.builders functions,
-- 'model_best_state': a readily loadable best state for the model, including
- the conditioner. The model obtained from `xp.cfg` should be compatible
- with this state dict. In the case of a LM, the encodec model would not be
- bundled along but instead provided separately.
-
-Those functions also support loading from a remote location with the Torch Hub API.
-They also support overriding some parameters, in particular the device and dtype
-of the returned model.
-"""
-
-from pathlib import Path
-from huggingface_hub import hf_hub_download
-import typing as tp
-import os
-
-from omegaconf import OmegaConf
-import torch
-
-from . import builders
-
-
-HF_MODEL_CHECKPOINTS_MAP = {
- "small": "facebook/musicgen-small",
- "medium": "facebook/musicgen-medium",
- "large": "facebook/musicgen-large",
- "melody": "facebook/musicgen-melody",
-}
-
-
-def _get_state_dict(
- file_or_url_or_id: tp.Union[Path, str],
- filename: tp.Optional[str] = None,
- device='cpu',
- cache_dir: tp.Optional[str] = None,
-):
- # Return the state dict either from a file or url
- file_or_url_or_id = str(file_or_url_or_id)
- assert isinstance(file_or_url_or_id, str)
-
- if os.path.isfile(file_or_url_or_id):
- return torch.load(file_or_url_or_id, map_location=device)
-
- if os.path.isdir(file_or_url_or_id):
- file = f"{file_or_url_or_id}/{filename}"
- return torch.load(file, map_location=device)
-
- elif file_or_url_or_id.startswith('https://'):
- return torch.hub.load_state_dict_from_url(file_or_url_or_id, map_location=device, check_hash=True)
-
- elif file_or_url_or_id in HF_MODEL_CHECKPOINTS_MAP:
- assert filename is not None, "filename needs to be defined if using HF checkpoints"
-
- repo_id = HF_MODEL_CHECKPOINTS_MAP[file_or_url_or_id]
- file = hf_hub_download(repo_id=repo_id, filename=filename, cache_dir=cache_dir)
- return torch.load(file, map_location=device)
-
- else:
- raise ValueError(f"{file_or_url_or_id} is not a valid name, path or link that can be loaded.")
-
-
-def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
- pkg = _get_state_dict(file_or_url_or_id, filename="compression_state_dict.bin", cache_dir=cache_dir)
- cfg = OmegaConf.create(pkg['xp.cfg'])
- cfg.device = str(device)
- model = builders.get_compression_model(cfg)
- model.load_state_dict(pkg['best_state'])
- model.eval()
- return model
-
-
-def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
- pkg = _get_state_dict(file_or_url_or_id, filename="state_dict.bin", cache_dir=cache_dir)
- cfg = OmegaConf.create(pkg['xp.cfg'])
- cfg.device = str(device)
- if cfg.device == 'cpu':
- cfg.dtype = 'float32'
- else:
- cfg.dtype = 'float16'
- model = builders.get_lm_model(cfg)
- model.load_state_dict(pkg['best_state'])
- model.eval()
- model.cfg = cfg
- return model
diff --git a/spaces/jspr/paperchat/ingest_faiss.py b/spaces/jspr/paperchat/ingest_faiss.py
deleted file mode 100644
index 696d9eab238d1f37acd0c3bd7c5c312b792d9328..0000000000000000000000000000000000000000
--- a/spaces/jspr/paperchat/ingest_faiss.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from pathlib import Path
-from langchain.text_splitter import CharacterTextSplitter
-import faiss
-from langchain.vectorstores import FAISS
-from langchain.embeddings import OpenAIEmbeddings
-import pickle
-
-def create_vector_store(suffix, paper_text):
- # with open('paper-dir/main.txt') as f:
- # paper_text = f.read()
-
- split_chars = ["§", "§.§"]
- data = []
- for c in split_chars:
- paper_text = paper_text.replace(c, "§")
- data = paper_text.split("§")
-
- # metadatas is the rest of the text on the same line as the section symbol
- sources = []
- for d in data:
- sources.append(d.split("\n")[0].strip())
- # data = [d.split("\n")[1:] for d in data]
-
- sources[0] = "Beginning of paper"
-
- # Here we split the documents, as needed, into smaller chunks.
- # We do this due to the context limits of the LLMs.
- text_splitter = CharacterTextSplitter(chunk_size=1500, separator="\n")
- docs = []
- metadatas = []
- for i, d in enumerate(data):
- splits = text_splitter.split_text(d)
- docs.extend(splits)
- metadatas.extend([{"source": sources[i]}] * len(splits))
-
- # Here we create a vector store from the documents and save it to disk.
- store = FAISS.from_texts(docs, OpenAIEmbeddings(), metadatas=metadatas)
- faiss.write_index(store.index, f"{suffix}/docs.index")
- store.index = None
- with open(f"{suffix}/faiss_store.pkl", "wb") as f:
- pickle.dump(store, f)
diff --git a/spaces/jsu27/decomp-diffusion/upsampling.py b/spaces/jsu27/decomp-diffusion/upsampling.py
deleted file mode 100644
index 1ca3d631743b0f04111bd0ca0ef62757449b650f..0000000000000000000000000000000000000000
--- a/spaces/jsu27/decomp-diffusion/upsampling.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import torch as th
-from diffusers import IFImg2ImgSuperResolutionPipeline
-from transformers import T5EncoderModel
-from PIL import Image
-import numpy as np
-
-def get_pipeline():
-
- text_encoder = T5EncoderModel.from_pretrained(
- "DeepFloyd/IF-I-XL-v1.0",
- subfolder="text_encoder",
- device_map="auto",
- load_in_8bit=True,
- variant="8bit"
- )
-
- pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained(
- "DeepFloyd/IF-II-L-v1.0",
- text_encoder=text_encoder,
- variant="fp16",
- torch_dtype=th.float16,
- device_map="auto",
- watermarker=None
- )
- return pipe
-
-
-
-def upscale_image(im, pipe):
- """im is 64x64 PIL image"""
- prompt = ''
- prompt_embeds, negative_embeds = pipe.encode_prompt(prompt)
- generator = th.Generator().manual_seed(0)
-
- image = pipe(
- image=original_image,
- original_image=original_image,
- prompt_embeds=prompt_embeds,
- negative_prompt_embeds=negative_embeds,
- generator=generator,
- ).images[0]
-
- return image
\ No newline at end of file
diff --git a/spaces/juliensimon/table_questions/README.md b/spaces/juliensimon/table_questions/README.md
deleted file mode 100644
index 4b3dc97d45106be363b6996c00ad8893cebb17a0..0000000000000000000000000000000000000000
--- a/spaces/juliensimon/table_questions/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Table Questions
-emoji: 🐠
-colorFrom: red
-colorTo: red
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/justest/gpt4free/g4f/.v1/gpt4free/usesless/__init__.py b/spaces/justest/gpt4free/g4f/.v1/gpt4free/usesless/__init__.py
deleted file mode 100644
index 00f7f75d0e33d106a514a98fdb20234fbf80d6c2..0000000000000000000000000000000000000000
--- a/spaces/justest/gpt4free/g4f/.v1/gpt4free/usesless/__init__.py
+++ /dev/null
@@ -1,158 +0,0 @@
-import string
-import time
-import re
-import json
-import requests
-import fake_useragent
-import random
-from password_generator import PasswordGenerator
-
-from .utils import create_email, check_email
-
-
-class Account:
- @staticmethod
- def create(logging: bool = False):
- is_custom_domain = input(
- "Do you want to use your custom domain name for temporary email? [Y/n]: "
- ).upper()
-
- if is_custom_domain == "Y":
- mail_address = create_email(custom_domain=True, logging=logging)
- elif is_custom_domain == "N":
- mail_address = create_email(custom_domain=False, logging=logging)
- else:
- print("Please, enter either Y or N")
- return
-
- name = string.ascii_lowercase + string.digits
- username = "".join(random.choice(name) for i in range(20))
-
- pwo = PasswordGenerator()
- pwo.minlen = 8
- password = pwo.generate()
-
- session = requests.Session()
-
- register_url = "https://ai.usesless.com/api/cms/auth/local/register"
- register_json = {
- "username": username,
- "password": password,
- "email": mail_address,
- }
- headers = {
- "authority": "ai.usesless.com",
- "accept": "application/json, text/plain, */*",
- "accept-language": "en-US,en;q=0.5",
- "cache-control": "no-cache",
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": fake_useragent.UserAgent().random,
- }
- register = session.post(register_url, json=register_json, headers=headers)
- if logging:
- if register.status_code == 200:
- print("Registered successfully")
- else:
- print(register.status_code)
- print(register.json())
- print("There was a problem with account registration, try again")
-
- if register.status_code != 200:
- quit()
-
- while True:
- time.sleep(5)
- messages = check_email(mail=mail_address, logging=logging)
-
- # Check if method `message_list()` didn't return None or empty list.
- if not messages or len(messages) == 0:
- # If it returned None or empty list sleep for 5 seconds to wait for new message.
- continue
-
- message_text = messages[0]["content"]
- verification_url = re.findall(
- r"http:\/\/ai\.usesless\.com\/api\/cms\/auth\/email-confirmation\?confirmation=\w.+\w\w",
- message_text,
- )[0]
- if verification_url:
- break
-
- session.get(verification_url)
- login_json = {"identifier": mail_address, "password": password}
- login_request = session.post(
- url="https://ai.usesless.com/api/cms/auth/local", json=login_json
- )
-
- token = login_request.json()["jwt"]
- if logging and token:
- print(f"Token: {token}")
-
- with open("account.json", "w") as file:
- json.dump({"email": mail_address, "token": token}, file)
- if logging:
- print(
- "\nNew account credentials has been successfully saved in 'account.json' file"
- )
-
- return token
-
-
-class Completion:
- @staticmethod
- def create(
- token: str,
- systemMessage: str = "You are a helpful assistant",
- prompt: str = "",
- parentMessageId: str = "",
- presence_penalty: float = 1,
- temperature: float = 1,
- model: str = "gpt-3.5-turbo",
- ):
- headers = {
- "authority": "ai.usesless.com",
- "accept": "application/json, text/plain, */*",
- "accept-language": "en-US,en;q=0.5",
- "cache-control": "no-cache",
- "sec-fetch-dest": "empty",
- "sec-fetch-mode": "cors",
- "sec-fetch-site": "same-origin",
- "user-agent": fake_useragent.UserAgent().random,
- "Authorization": f"Bearer {token}",
- }
-
- json_data = {
- "openaiKey": "",
- "prompt": prompt,
- "options": {
- "parentMessageId": parentMessageId,
- "systemMessage": systemMessage,
- "completionParams": {
- "presence_penalty": presence_penalty,
- "temperature": temperature,
- "model": model,
- },
- },
- }
-
- url = "https://ai.usesless.com/api/chat-process"
- request = requests.post(url, headers=headers, json=json_data)
- request.encoding = request.apparent_encoding
- content = request.content
-
- response = Completion.__response_to_json(content)
- return response
-
-
- @classmethod
- def __response_to_json(cls, text) -> str:
- text = str(text.decode("utf-8"))
-
- split_text = text.rsplit("\n", 1)
- if len(split_text) > 1:
- to_json = json.loads(split_text[1])
- return to_json
- else:
- return None
-
diff --git a/spaces/jytole/hftesting/README.md b/spaces/jytole/hftesting/README.md
deleted file mode 100644
index ff80aead3e36c02b330c89e789e54fc1ff17319b..0000000000000000000000000000000000000000
--- a/spaces/jytole/hftesting/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Hftesting
-emoji: 🐨
-colorFrom: blue
-colorTo: pink
-sdk: gradio
-sdk_version: 3.34.0
-app_file: app.py
-pinned: false
-license: cc
-python_version: 3.9.13
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/kadirnar/classifyhub/README.md b/spaces/kadirnar/classifyhub/README.md
deleted file mode 100644
index f383aa918ae0ffb44a020f6b00322b94108f5476..0000000000000000000000000000000000000000
--- a/spaces/kadirnar/classifyhub/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: Classifyhub
-emoji: 🌍
-colorFrom: green
-colorTo: gray
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
-license: gpl-3.0
-tags:
-- making-demos
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/kevinwang676/VoiceChanger/src/face3d/util/__init__.py b/spaces/kevinwang676/VoiceChanger/src/face3d/util/__init__.py
deleted file mode 100644
index 04eecb58b62f8c9d11d17606c6241d278a48b9b9..0000000000000000000000000000000000000000
--- a/spaces/kevinwang676/VoiceChanger/src/face3d/util/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""This package includes a miscellaneous collection of useful helper functions."""
-from src.face3d.util import *
-
diff --git a/spaces/kosurisiva/MyGenAiChatBot/README.md b/spaces/kosurisiva/MyGenAiChatBot/README.md
deleted file mode 100644
index 1e4a67438dd921cb10346fd946f0073c3469de48..0000000000000000000000000000000000000000
--- a/spaces/kosurisiva/MyGenAiChatBot/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: MyGenAiChatBot
-emoji: 📊
-colorFrom: blue
-colorTo: red
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/functorch/dim/wrap_type.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/functorch/dim/wrap_type.py
deleted file mode 100644
index 8212836d3d6ae71f84dcb161b60bd513fe35b571..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/functorch/dim/wrap_type.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-# All rights reserved.
-#
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree.
-
-from types import FunctionType, BuiltinMethodType, MethodDescriptorType, WrapperDescriptorType, GetSetDescriptorType
-from functorch._C import dim as _C
-_wrap_method = _C._wrap_method
-
-FUNC_TYPES = (FunctionType, MethodDescriptorType, BuiltinMethodType, WrapperDescriptorType)
-PROPERTY_TYPES = (GetSetDescriptorType, property)
-
-def _py_wrap_method(orig, __torch_function__):
- def impl(*args, **kwargs):
- return __torch_function__(orig, None, args, kwargs)
- return impl
-
-
-
-def wrap_type(use_c, to_patch, pattern, __torch_function__):
-
- if use_c:
- wrap_method = _wrap_method
- else:
- wrap_method = _py_wrap_method
-
- all = {}
- for t in reversed(pattern.mro()[:-1]): # skip object
- all.update(t.__dict__)
-
-
- def wrap_attr(orig):
- return property(wrap_method(orig.__get__, __torch_function__))
-
-
- for name, obj in all.items():
- if name in ('__dict__', '__new__', '__init__', '__repr__', '__weakref__', '__doc__', '__module__', '__dir__'):
- continue
-
- # skip things that have been overloaded
- # things that come from object like `__eq__` still need to be patched, however.
- if hasattr(to_patch, name) and getattr(to_patch, name) is not getattr(object, name, None):
- continue
-
- if isinstance(obj, FUNC_TYPES):
- setattr(to_patch, name, wrap_method(obj, __torch_function__))
- elif isinstance(obj, PROPERTY_TYPES):
- setattr(to_patch, name, wrap_attr(obj))
diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/_webhooks_server.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/_webhooks_server.py
deleted file mode 100644
index 7cc5dd4ce7769fee10e0198cffe79f64a33b211d..0000000000000000000000000000000000000000
--- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/huggingface_hub/_webhooks_server.py
+++ /dev/null
@@ -1,369 +0,0 @@
-# coding=utf-8
-# Copyright 2023-present, the HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Contains `WebhooksServer` and `webhook_endpoint` to create a webhook server easily."""
-import atexit
-import inspect
-import os
-from functools import wraps
-from typing import TYPE_CHECKING, Callable, Dict, Optional
-
-from .utils import experimental, is_gradio_available
-
-
-if TYPE_CHECKING:
- import gradio as gr
-
-
-from fastapi import FastAPI, Request
-from fastapi.responses import JSONResponse
-
-
-_global_app: Optional["WebhooksServer"] = None
-_is_local = os.getenv("SYSTEM") != "spaces"
-
-
-@experimental
-class WebhooksServer:
- """
- The [`WebhooksServer`] class lets you create an instance of a Gradio app that can receive Huggingface webhooks.
- These webhooks can be registered using the [`~WebhooksServer.add_webhook`] decorator. Webhook endpoints are added to
- the app as a POST endpoint to the FastAPI router. Once all the webhooks are registered, the `run` method has to be
- called to start the app.
-
- It is recommended to accept [`WebhookPayload`] as the first argument of the webhook function. It is a Pydantic
- model that contains all the information about the webhook event. The data will be parsed automatically for you.
-
- Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your
- WebhooksServer and deploy it on a Space.
-
-
-
- `WebhooksServer` is experimental. Its API is subject to change in the future.
-
-
-
-
-
- You must have `gradio` installed to use `WebhooksServer` (`pip install --upgrade gradio`).
-
-
-
- Args:
- ui (`gradio.Blocks`, optional):
- A Gradio UI instance to be used as the Space landing page. If `None`, a UI displaying instructions
- about the configured webhooks is created.
- webhook_secret (`str`, optional):
- A secret key to verify incoming webhook requests. You can set this value to any secret you want as long as
- you also configure it in your [webhooks settings panel](https://huggingface.co/settings/webhooks). You
- can also set this value as the `WEBHOOK_SECRET` environment variable. If no secret is provided, the
- webhook endpoints are opened without any security.
-
- Example:
-
- ```python
- import gradio as gr
- from huggingface_hub import WebhooksServer, WebhookPayload
-
- with gr.Blocks() as ui:
- ...
-
- app = WebhooksServer(ui=ui, webhook_secret="my_secret_key")
-
- @app.add_webhook("/say_hello")
- async def hello(payload: WebhookPayload):
- return {"message": "hello"}
-
- app.run()
- ```
- """
-
- def __new__(cls, *args, **kwargs) -> "WebhooksServer":
- if not is_gradio_available():
- raise ImportError(
- "You must have `gradio` installed to use `WebhooksServer`. Please run `pip install --upgrade gradio`"
- " first."
- )
- return super().__new__(cls)
-
- def __init__(
- self,
- ui: Optional["gr.Blocks"] = None,
- webhook_secret: Optional[str] = None,
- ) -> None:
- self._ui = ui
-
- self.webhook_secret = webhook_secret or os.getenv("WEBHOOK_SECRET")
- self.registered_webhooks: Dict[str, Callable] = {}
- _warn_on_empty_secret(self.webhook_secret)
-
- def add_webhook(self, path: Optional[str] = None) -> Callable:
- """
- Decorator to add a webhook to the [`WebhooksServer`] server.
-
- Args:
- path (`str`, optional):
- The URL path to register the webhook function. If not provided, the function name will be used as the
- path. In any case, all webhooks are registered under `/webhooks`.
-
- Raises:
- ValueError: If the provided path is already registered as a webhook.
-
- Example:
- ```python
- from huggingface_hub import WebhooksServer, WebhookPayload
-
- app = WebhooksServer()
-
- @app.add_webhook
- async def trigger_training(payload: WebhookPayload):
- if payload.repo.type == "dataset" and payload.event.action == "update":
- # Trigger a training job if a dataset is updated
- ...
-
- app.run()
- ```
- """
- # Usage: directly as decorator. Example: `@app.add_webhook`
- if callable(path):
- # If path is a function, it means it was used as a decorator without arguments
- return self.add_webhook()(path)
-
- # Usage: provide a path. Example: `@app.add_webhook(...)`
- @wraps(FastAPI.post)
- def _inner_post(*args, **kwargs):
- func = args[0]
- abs_path = f"/webhooks/{(path or func.__name__).strip('/')}"
- if abs_path in self.registered_webhooks:
- raise ValueError(f"Webhook {abs_path} already exists.")
- self.registered_webhooks[abs_path] = func
-
- return _inner_post
-
- def run(self) -> None:
- """Starts the Gradio app with the FastAPI server and registers the webhooks."""
- ui = self._ui or self._get_default_ui()
-
- # Start Gradio App
- # - as non-blocking so that webhooks can be added afterwards
- # - as shared if launch locally (to debug webhooks)
- self.fastapi_app, _, _ = ui.launch(prevent_thread_lock=True, share=_is_local)
-
- # Register webhooks to FastAPI app
- for path, func in self.registered_webhooks.items():
- # Add secret check if required
- if self.webhook_secret is not None:
- func = _wrap_webhook_to_check_secret(func, webhook_secret=self.webhook_secret)
-
- # Add route to FastAPI app
- self.fastapi_app.post(path)(func)
-
- # Print instructions and block main thread
- url = (ui.share_url or ui.local_url).strip("/")
- message = "\nWebhooks are correctly setup and ready to use:"
- message += "\n" + "\n".join(f" - POST {url}{webhook}" for webhook in self.registered_webhooks)
- message += "\nGo to https://huggingface.co/settings/webhooks to setup your webhooks."
- print(message)
-
- ui.block_thread()
-
- def _get_default_ui(self) -> "gr.Blocks":
- """Default UI if not provided (lists webhooks and provides basic instructions)."""
- import gradio as gr
-
- with gr.Blocks() as ui:
- gr.Markdown("# This is an app to process 🤗 Webhooks")
- gr.Markdown(
- "Webhooks are a foundation for MLOps-related features. They allow you to listen for new changes on"
- " specific repos or to all repos belonging to particular set of users/organizations (not just your"
- " repos, but any repo). Check out this [guide](https://huggingface.co/docs/hub/webhooks) to get to"
- " know more about webhooks on the Huggingface Hub."
- )
- gr.Markdown(
- f"{len(self.registered_webhooks)} webhook(s) are registered:"
- + "\n\n"
- + "\n ".join(
- f"- [{webhook_path}]({_get_webhook_doc_url(webhook.__name__, webhook_path)})"
- for webhook_path, webhook in self.registered_webhooks.items()
- )
- )
- gr.Markdown(
- "Go to https://huggingface.co/settings/webhooks to setup your webhooks."
- + "\nYou app is running locally. Please look at the logs to check the full URL you need to set."
- if _is_local
- else (
- "\nThis app is running on a Space. You can find the corresponding URL in the options menu"
- " (top-right) > 'Embed the Space'. The URL looks like 'https://{username}-{repo_name}.hf.space'."
- )
- )
- return ui
-
-
-@experimental
-def webhook_endpoint(path: Optional[str] = None) -> Callable:
- """Decorator to start a [`WebhooksServer`] and register the decorated function as a webhook endpoint.
-
- This is a helper to get started quickly. If you need more flexibility (custom landing page or webhook secret),
- you can use [`WebhooksServer`] directly. You can register multiple webhook endpoints (to the same server) by using
- this decorator multiple times.
-
- Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your
- server and deploy it on a Space.
-
-
-
- `webhook_endpoint` is experimental. Its API is subject to change in the future.
-
-
-
-
-
- You must have `gradio` installed to use `webhook_endpoint` (`pip install --upgrade gradio`).
-
-
-
- Args:
- path (`str`, optional):
- The URL path to register the webhook function. If not provided, the function name will be used as the path.
- In any case, all webhooks are registered under `/webhooks`.
-
- Examples:
- The default usage is to register a function as a webhook endpoint. The function name will be used as the path.
- The server will be started automatically at exit (i.e. at the end of the script).
-
- ```python
- from huggingface_hub import webhook_endpoint, WebhookPayload
-
- @webhook_endpoint
- async def trigger_training(payload: WebhookPayload):
- if payload.repo.type == "dataset" and payload.event.action == "update":
- # Trigger a training job if a dataset is updated
- ...
-
- # Server is automatically started at the end of the script.
- ```
-
- Advanced usage: register a function as a webhook endpoint and start the server manually. This is useful if you
- are running it in a notebook.
-
- ```python
- from huggingface_hub import webhook_endpoint, WebhookPayload
-
- @webhook_endpoint
- async def trigger_training(payload: WebhookPayload):
- if payload.repo.type == "dataset" and payload.event.action == "update":
- # Trigger a training job if a dataset is updated
- ...
-
- # Start the server manually
- trigger_training.run()
- ```
- """
- if callable(path):
- # If path is a function, it means it was used as a decorator without arguments
- return webhook_endpoint()(path)
-
- @wraps(WebhooksServer.add_webhook)
- def _inner(func: Callable) -> Callable:
- app = _get_global_app()
- app.add_webhook(path)(func)
- if len(app.registered_webhooks) == 1:
- # Register `app.run` to run at exit (only once)
- atexit.register(app.run)
-
- @wraps(app.run)
- def _run_now():
- # Run the app directly (without waiting atexit)
- atexit.unregister(app.run)
- app.run()
-
- func.run = _run_now # type: ignore
- return func
-
- return _inner
-
-
-def _get_global_app() -> WebhooksServer:
- global _global_app
- if _global_app is None:
- _global_app = WebhooksServer()
- return _global_app
-
-
-def _warn_on_empty_secret(webhook_secret: Optional[str]) -> None:
- if webhook_secret is None:
- print("Webhook secret is not defined. This means your webhook endpoints will be open to everyone.")
- print(
- "To add a secret, set `WEBHOOK_SECRET` as environment variable or pass it at initialization: "
- "\n\t`app = WebhooksServer(webhook_secret='my_secret', ...)`"
- )
- print(
- "For more details about webhook secrets, please refer to"
- " https://huggingface.co/docs/hub/webhooks#webhook-secret."
- )
- else:
- print("Webhook secret is correctly defined.")
-
-
-def _get_webhook_doc_url(webhook_name: str, webhook_path: str) -> str:
- """Returns the anchor to a given webhook in the docs (experimental)"""
- return "/docs#/default/" + webhook_name + webhook_path.replace("/", "_") + "_post"
-
-
-def _wrap_webhook_to_check_secret(func: Callable, webhook_secret: str) -> Callable:
- """Wraps a webhook function to check the webhook secret before calling the function.
-
- This is a hacky way to add the `request` parameter to the function signature. Since FastAPI based itself on route
- parameters to inject the values to the function, we need to hack the function signature to retrieve the `Request`
- object (and hence the headers). A far cleaner solution would be to use a middleware. However, since
- `fastapi==0.90.1`, a middleware cannot be added once the app has started. And since the FastAPI app is started by
- Gradio internals (and not by us), we cannot add a middleware.
-
- This method is called only when a secret has been defined by the user. If a request is sent without the
- "x-webhook-secret", the function will return a 401 error (unauthorized). If the header is sent but is incorrect,
- the function will return a 403 error (forbidden).
-
- Inspired by https://stackoverflow.com/a/33112180.
- """
- initial_sig = inspect.signature(func)
-
- @wraps(func)
- async def _protected_func(request: Request, **kwargs):
- request_secret = request.headers.get("x-webhook-secret")
- if request_secret is None:
- return JSONResponse({"error": "x-webhook-secret header not set."}, status_code=401)
- if request_secret != webhook_secret:
- return JSONResponse({"error": "Invalid webhook secret."}, status_code=403)
-
- # Inject `request` in kwargs if required
- if "request" in initial_sig.parameters:
- kwargs["request"] = request
-
- # Handle both sync and async routes
- if inspect.iscoroutinefunction(func):
- return await func(**kwargs)
- else:
- return func(**kwargs)
-
- # Update signature to include request
- if "request" not in initial_sig.parameters:
- _protected_func.__signature__ = initial_sig.replace( # type: ignore
- parameters=(
- inspect.Parameter(name="request", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=Request),
- )
- + tuple(initial_sig.parameters.values())
- )
-
- # Return protected route
- return _protected_func
diff --git a/spaces/leave7/kazunaAI2.0/add_speaker.py b/spaces/leave7/kazunaAI2.0/add_speaker.py
deleted file mode 100644
index e224f07c892a5fe1837e3cbf1745e0d8992ea283..0000000000000000000000000000000000000000
--- a/spaces/leave7/kazunaAI2.0/add_speaker.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import os
-import argparse
-from tqdm import tqdm
-from random import shuffle
-import json
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--train_list", type=str, default="./filelists/train.txt", help="path to train list")
- parser.add_argument("--val_list", type=str, default="./filelists/val.txt", help="path to val list")
- parser.add_argument("--test_list", type=str, default="./filelists/test.txt", help="path to test list")
- parser.add_argument("--source_dir", type=str, default="./dataset/32k", help="path to source dir")
- args = parser.parse_args()
-
- previous_config = json.load(open("configs/config.json", "rb"))
-
- train = []
- val = []
- test = []
- idx = 0
- spk_dict = previous_config["spk"]
- spk_id = max([i for i in spk_dict.values()]) + 1
- for speaker in tqdm(os.listdir(args.source_dir)):
- if speaker not in spk_dict.keys():
- spk_dict[speaker] = spk_id
- spk_id += 1
- wavs = [os.path.join(args.source_dir, speaker, i)for i in os.listdir(os.path.join(args.source_dir, speaker))]
- wavs = [i for i in wavs if i.endswith("wav")]
- shuffle(wavs)
- train += wavs[2:-10]
- val += wavs[:2]
- test += wavs[-10:]
-
- assert previous_config["model"]["n_speakers"] > len(spk_dict.keys())
- shuffle(train)
- shuffle(val)
- shuffle(test)
-
- print("Writing", args.train_list)
- with open(args.train_list, "w") as f:
- for fname in tqdm(train):
- wavpath = fname
- f.write(wavpath + "\n")
-
- print("Writing", args.val_list)
- with open(args.val_list, "w") as f:
- for fname in tqdm(val):
- wavpath = fname
- f.write(wavpath + "\n")
-
- print("Writing", args.test_list)
- with open(args.test_list, "w") as f:
- for fname in tqdm(test):
- wavpath = fname
- f.write(wavpath + "\n")
-
- previous_config["spk"] = spk_dict
-
- print("Writing configs/config.json")
- with open("configs/config.json", "w") as f:
- json.dump(previous_config, f, indent=2)
diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Free Xats Generator V1.3 Download [TOP].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Free Xats Generator V1.3 Download [TOP].md
deleted file mode 100644
index f75d3cccbd175b07b07f729d8e020ec96fbc00be..0000000000000000000000000000000000000000
--- a/spaces/lincquiQcaudo/Top-20-Diffusion/Free Xats Generator V1.3 Download [TOP].md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Esteem 8 Software Crack Sites > https://bytlly.com/1sitnf · Free xats generator v1.3 download · Knock Out hindi movie torrent. Bein sport 1 canl izle hd pulive ... 1fdad05405
-
-
-
diff --git a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/utils/utils_config.py b/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/utils/utils_config.py
deleted file mode 100644
index 0c02eaf70fc0140aca7925f621c29a496f491cae..0000000000000000000000000000000000000000
--- a/spaces/lithiumice/SadTalker/src/face3d/models/arcface_torch/utils/utils_config.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import importlib
-import os.path as osp
-
-
-def get_config(config_file):
- assert config_file.startswith('configs/'), 'config file setting must start with configs/'
- temp_config_name = osp.basename(config_file)
- temp_module_name = osp.splitext(temp_config_name)[0]
- config = importlib.import_module("configs.base")
- cfg = config.config
- config = importlib.import_module("configs.%s" % temp_module_name)
- job_cfg = config.config
- cfg.update(job_cfg)
- if cfg.output is None:
- cfg.output = osp.join('work_dirs', temp_module_name)
- return cfg
\ No newline at end of file
diff --git a/spaces/ljjggr/bingo/src/lib/bots/bing/types.ts b/spaces/ljjggr/bingo/src/lib/bots/bing/types.ts
deleted file mode 100644
index 02cd5e8b01e3529642d28dc1539bf958f4ac420b..0000000000000000000000000000000000000000
--- a/spaces/ljjggr/bingo/src/lib/bots/bing/types.ts
+++ /dev/null
@@ -1,259 +0,0 @@
-export type Author = 'user' | 'system' | 'bot'
-
-export type BotId = 'bing'
-
-export enum BingConversationStyle {
- Creative = 'Creative',
- Balanced = 'Balanced',
- Precise = 'Precise'
-}
-
-export enum ErrorCode {
- CONVERSATION_LIMIT = 'CONVERSATION_LIMIT',
- BING_UNAUTHORIZED = 'BING_UNAUTHORIZED',
- BING_FORBIDDEN = 'BING_FORBIDDEN',
- BING_CAPTCHA = 'BING_CAPTCHA',
- THROTTLE_LIMIT = 'THROTTLE_LIMIT',
- NOTFOUND_ERROR = 'NOT_FOUND_ERROR',
- UNKOWN_ERROR = 'UNKOWN_ERROR',
- NETWORK_ERROR = 'NETWORK_ERROR',
-}
-
-export class ChatError extends Error {
- code: ErrorCode
- constructor(message: string, code: ErrorCode) {
- super(message)
- this.code = code
- }
-}
-
-export type ChatMessageModel = {
- id: string
- author: Author
- text: string
- error?: ChatError
- throttling?: Throttling
- sourceAttributions?: SourceAttribution[]
- suggestedResponses?: SuggestedResponse[]
-}
-
-export interface ConversationModel {
- messages: ChatMessageModel[]
-}
-
-export type Event =
- | {
- type: 'UPDATE_ANSWER'
- data: {
- text: string
- spokenText?: string
- sourceAttributions?: SourceAttribution[]
- suggestedResponses?: SuggestedResponse[]
- throttling?: Throttling
- }
- }
- | {
- type: 'DONE'
- }
- | {
- type: 'ERROR'
- error: ChatError
- }
-
-export interface SendMessageParams {
- prompt: string
- imageUrl?: string
- options: T
- onEvent: (event: Event) => void
- signal?: AbortSignal
-}
-
-export interface ConversationResponse {
- conversationId: string
- clientId: string
- conversationSignature: string
- result: {
- value: string
- message?: string
- }
-}
-
-export interface Telemetry {
- metrics?: null
- startTime: string
-}
-
-export interface ChatUpdateArgument {
- messages?: ChatResponseMessage[]
- throttling?: Throttling
- requestId: string
- result: null
-}
-
-export type ChatUpdateCompleteResponse = {
- type: 2
- invocationId: string
- item: ChatResponseItem
-} | {
- type: 1
- target: string
- arguments: ChatUpdateArgument[]
-} | {
- type: 3
- invocationId: string
-} | {
- type: 6 | 7
-}
-
-export interface ChatRequestResult {
- value: string
- serviceVersion: string
- error?: string
-}
-
-export interface ChatResponseItem {
- messages: ChatResponseMessage[]
- firstNewMessageIndex: number
- suggestedResponses: null
- conversationId: string
- requestId: string
- conversationExpiryTime: string
- telemetry: Telemetry
- result: ChatRequestResult
- throttling: Throttling
-}
-export enum InvocationEventType {
- Invocation = 1,
- StreamItem = 2,
- Completion = 3,
- StreamInvocation = 4,
- CancelInvocation = 5,
- Ping = 6,
- Close = 7,
-}
-
-// https://github.com/bytemate/bingchat-api/blob/main/src/lib.ts
-
-export interface ConversationInfo {
- conversationId: string
- clientId: string
- conversationSignature: string
- invocationId: number
- conversationStyle: BingConversationStyle
- prompt: string
- imageUrl?: string
-}
-
-export interface BingChatResponse {
- conversationSignature: string
- conversationId: string
- clientId: string
- invocationId: number
- conversationExpiryTime: Date
- response: string
- details: ChatResponseMessage
-}
-
-export interface Throttling {
- maxNumLongDocSummaryUserMessagesInConversation: number
- maxNumUserMessagesInConversation: number
- numLongDocSummaryUserMessagesInConversation: number
- numUserMessagesInConversation: number
-}
-
-export interface ChatResponseMessage {
- text: string
- spokenText?: string
- author: string
- createdAt: Date
- timestamp: Date
- messageId: string
- requestId: string
- offense: string
- adaptiveCards: AdaptiveCard[]
- sourceAttributions: SourceAttribution[]
- feedback: Feedback
- contentOrigin: string
- messageType?: string
- contentType?: string
- privacy: null
- suggestedResponses: SuggestedResponse[]
-}
-
-export interface AdaptiveCard {
- type: string
- version: string
- body: Body[]
-}
-
-export interface Body {
- type: string
- text: string
- wrap: boolean
- size?: string
-}
-
-export interface Feedback {
- tag: null
- updatedOn: null
- type: string
-}
-
-export interface SourceAttribution {
- providerDisplayName: string
- seeMoreUrl: string
- searchQuery: string
-}
-
-export interface SuggestedResponse {
- text: string
- author?: Author
- createdAt?: Date
- timestamp?: Date
- messageId?: string
- messageType?: string
- offense?: string
- feedback?: Feedback
- contentOrigin?: string
- privacy?: null
-}
-
-export interface KBlobRequest {
- knowledgeRequest: KnowledgeRequestContext
- imageBase64?: string
-}
-
-export interface KBlobResponse {
- blobId: string
- processedBlobId?: string
-}
-
-export interface KnowledgeRequestContext {
- imageInfo: ImageInfo;
- knowledgeRequest: KnowledgeRequest;
-}
-
-export interface ImageInfo {
- url?: string;
-}
-
-export interface KnowledgeRequest {
- invokedSkills: string[];
- subscriptionId: string;
- invokedSkillsRequestData: InvokedSkillsRequestData;
- convoData: ConvoData;
-}
-
-export interface ConvoData {
- convoid: string;
- convotone: BingConversationStyle;
-}
-
-export interface InvokedSkillsRequestData {
- enableFaceBlur: boolean;
-}
-
-export interface FileItem {
- url: string;
- status?: 'loading' | 'error' | 'loaded'
-}
diff --git a/spaces/ltgoslo/ssa-perin/mtool/score/sdp.py b/spaces/ltgoslo/ssa-perin/mtool/score/sdp.py
deleted file mode 100644
index ec0212a21d6869eb6385031c9f808d14bda40463..0000000000000000000000000000000000000000
--- a/spaces/ltgoslo/ssa-perin/mtool/score/sdp.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# Marco Kuhlmann
-
-import sys
-
-from score.core import anchor, intersect;
-
-class Measure(object):
-
- def __init__(self, get_items):
- self.get_items = get_items
- self.g = 0
- self.s = 0
- self.c = 0
- self.n_updates = 0
- self.n_matches = 0
-
- def update(self, gold, system, gidentities, sidentities, trace = 0):
- g_items = set(self.get_items(gold, gidentities))
- s_items = set(self.get_items(system, sidentities))
- self.g += len(g_items)
- self.s += len(s_items)
- self.c += len(g_items & s_items)
- self.n_updates += 1
- self.n_matches += g_items == s_items
- if trace:
- return {"g": len(g_items), "s": len(s_items),
- "c": len(g_items & s_items), "m": 1 if g_items == s_items else 0};
-
- def p(self):
- return self.c / self.s if self.s != 0 else 0.0
-
- def r(self):
- return self.c / self.g if self.g != 0 else 0.0
-
- def f(self):
- p = self.p()
- r = self.r()
- return 2 * p * r / (p + r) if p + r != 0 else 0.0
-
- def m(self):
- return self.n_matches / self.n_updates if self.n_updates != 0 else 0.0
-
- def report(self):
- json = {}
- json["g"] = self.g
- json["s"] = self.s
- json["c"] = self.c
- json["p"] = self.p()
- json["r"] = self.r()
- json["f"] = self.f()
- json["m"] = self.m()
- return json
-
-# def argument_predicate_dm(label):
-# return True
-
-# def argument_predicate_pas(label):
-# arguments = set("adj_ARG1 adj_ARG2 adj_MOD coord_ARG1 coord_ARG2 prep_ARG1 prep_ARG2 prep_ARG3 prep_MOD verb_ARG1 verb_ARG2 verb_ARG3 verb_ARG4 verb_MOD".split())
-# return label in arguments
-
-# def argument_predicate_psd(label):
-# return label.endswith("-arg")
-
-class Scorer(object):
-
- def __init__(self, include_virtual=True):
- self.measures = []
- self.measures.append(("labeled", Measure(self.get_itemsL)))
- self.measures.append(("unlabeled", Measure(self.get_itemsU)))
- # self.measureP = Measure(self.get_itemsP)
- # self.measureF = Measure(self.get_itemsF)
- # self.measureS = Measure(self.get_itemsS)
- self.include_virtual = include_virtual
-
- def identify(self, id):
- return self.identities[id]
-
- def get_itemsL(self, graph, identities):
- result = {(identities[e.src], identities[e.tgt], e.lab) for e in graph.edges}
- if self.include_virtual:
- for node in graph.nodes:
- if node.is_top:
- result.add((-1, identities[node.id], None))
- return result
-
- def get_itemsU(self, graph, identities):
- result = {(identities[e.src], identities[e.tgt]) for e in graph.edges}
- if self.include_virtual:
- for node in graph.nodes:
- if node.is_top:
- result.add((-1, identities[node.id]))
- return result
-
- # def get_itemsP(self, graph):
- # return {(frame[0], frame[2]) for frame in self.get_itemsF(graph)}
-
- # def get_itemsF(self, graph):
- # result = set()
- # for node in graph.nodes:
- # if self.has_scorable_predicate(node):
- # arguments = set()
- # for edge in node.outgoing_edges:
- # if self.argument_predicate(edge.lab):
- # arguments.add(edge)
- # extract = (node.id, node.sense, tuple(sorted(arguments)))
- # result.add(extract)
- # return result
-
- # def get_itemsS(self, graph):
- # return {(frame[0], frame[1]) for frame in self.get_itemsF(graph)}
-
- # def argument_predicate(self, label):
- # return True
-
- # def has_scorable_predicate(self, node):
- # return node.pred and node.pos.startswith("V")
-
- # def show_predications(self, g):
- # print(g.id)
- # report_predications(self.complete_predications(g))
-
- def update(self, g, s, trace):
- gidentities = {node.id: tuple(anchor(node)) for node in g.nodes}
- sidentities = {node.id: tuple(anchor(node)) for node in s.nodes}
- scores = dict();
- for key, measure in self.measures:
- score = measure.update(g, s, gidentities, sidentities, trace)
- if trace: scores[key] = score;
- return scores;
-
- def report(self, n, scores = None):
- json = {"n": n}
- for info, measure in self.measures:
- json[info] = measure.report()
- if scores is not None: json["scores"] = scores
- return json
-
-def evaluate(gold, system, format = "json", trace = 0):
- scorer = Scorer(include_virtual=True)
- n = 0
- scores = dict() if trace else None
- for g, s in intersect(gold, system):
- score = scorer.update(g, s, trace)
- n += 1
- if trace: scores[g.id] = score
- result = scorer.report(n, scores)
- return result
diff --git a/spaces/luisoala/raw2logit/utils/augmentation.py b/spaces/luisoala/raw2logit/utils/augmentation.py
deleted file mode 100644
index 6bf4cd14a646e6d52b0bb6214f591954ccb8c336..0000000000000000000000000000000000000000
--- a/spaces/luisoala/raw2logit/utils/augmentation.py
+++ /dev/null
@@ -1,132 +0,0 @@
-import random
-import numpy as np
-
-import torch
-import torchvision.transforms as T
-
-
-class RandomRotate90(): # Note: not the same as T.RandomRotation(90)
- def __call__(self, x):
- x = x.rot90(random.randint(0, 3), dims=(-1, -2))
- return x
-
- def __repr__(self):
- return self.__class__.__name__
-
-
-class AddGaussianNoise():
- def __init__(self, std=0.01):
- self.std = std
-
- def __call__(self, x):
- # noise = torch.randn_like(x) * self.std
- # out = x + noise
- # debug(x)
- # debug(noise)
- # debug(out)
- return x + torch.randn_like(x) * self.std
-
- def __repr__(self):
- return self.__class__.__name__ + f'(std={self.std})'
-
-
-def set_global_seed(seed):
- torch.random.manual_seed(seed)
- np.random.seed(seed % (2**32 - 1))
- random.seed(seed)
-
-
-class ComposeState(T.Compose):
- def __init__(self, transforms):
- self.transforms = []
- self.mask_transforms = []
-
- for t in transforms:
- apply_for_mask = True
- if isinstance(t, tuple):
- t, apply_for_mask = t
- self.transforms.append(t)
- if apply_for_mask:
- self.mask_transforms.append(t)
-
- self.seed = None
-
- # @debug
- def __call__(self, x, retain_state=False, mask_transform=False):
- if self.seed is not None: # retain previous state
- set_global_seed(self.seed)
- if retain_state: # save state for next call
- self.seed = self.seed or torch.seed()
- set_global_seed(self.seed)
- else:
- self.seed = None # reset / ignore state
-
- transforms = self.transforms if not mask_transform else self.mask_transforms
- for t in transforms:
- x = t(x)
- return x
-
-
-augmentation_weak = ComposeState([
- T.RandomHorizontalFlip(),
- T.RandomVerticalFlip(),
- RandomRotate90(),
-])
-
-
-augmentation_strong = ComposeState([
- T.RandomHorizontalFlip(p=0.5),
- T.RandomVerticalFlip(p=0.5),
- T.RandomApply([T.RandomRotation(90)], p=0.5),
- # (transform, apply_to_mask=True)
- (T.RandomApply([AddGaussianNoise(std=0.0005)], p=0.5), False),
- (T.RandomAdjustSharpness(0.5, p=0.5), False),
-])
-
-
-def get_augmentation(type):
- if type == 'none':
- return None
- if type == 'weak':
- return augmentation_weak
- if type == 'strong':
- return augmentation_strong
-
-
-if __name__ == '__main__':
- import os
- if not os.path.exists('README.md'):
- os.chdir('..')
-
- # from utils.debug import debug
- from dataset import get_dataset
- import matplotlib.pyplot as plt
-
- dataset = get_dataset('DS') # drone segmentation
- img, mask = dataset[10]
- mask = (mask + 0.2) / 1.2
-
- plt.figure(figsize=(14, 8))
- plt.subplot(121)
- plt.imshow(img)
- plt.subplot(122)
- plt.imshow(mask)
- plt.suptitle('no augmentation')
- plt.show()
-
- from utils.base import np2torch, torch2np
- img, mask = np2torch(img), np2torch(mask)
-
- # from utils.augmentation import get_augmentation
- augmentation = get_augmentation('strong')
-
- set_global_seed(1)
-
- for i in range(1, 4):
- plt.figure(figsize=(14, 8))
- plt.subplot(121)
- plt.imshow(torch2np(augmentation(img.unsqueeze(0), retain_state=True)).squeeze())
- plt.subplot(122)
- plt.imshow(torch2np(augmentation(mask.unsqueeze(0), mask_transform=True)).squeeze())
- plt.suptitle(f'augmentation test {i}')
- plt.show()
diff --git a/spaces/lunarring/latentblending/ldm/models/diffusion/dpm_solver/__init__.py b/spaces/lunarring/latentblending/ldm/models/diffusion/dpm_solver/__init__.py
deleted file mode 100644
index 7427f38c07530afbab79154ea8aaf88c4bf70a08..0000000000000000000000000000000000000000
--- a/spaces/lunarring/latentblending/ldm/models/diffusion/dpm_solver/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .sampler import DPMSolverSampler
\ No newline at end of file
diff --git a/spaces/ma-xu/LIVE/pybind11/tests/test_sequences_and_iterators.cpp b/spaces/ma-xu/LIVE/pybind11/tests/test_sequences_and_iterators.cpp
deleted file mode 100644
index 1ce0451092b2050b4b5ef762c6a5e755237844d8..0000000000000000000000000000000000000000
--- a/spaces/ma-xu/LIVE/pybind11/tests/test_sequences_and_iterators.cpp
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- tests/test_sequences_and_iterators.cpp -- supporting Pythons' sequence protocol, iterators,
- etc.
-
- Copyright (c) 2016 Wenzel Jakob
-
- All rights reserved. Use of this source code is governed by a
- BSD-style license that can be found in the LICENSE file.
-*/
-
-#include "pybind11_tests.h"
-#include "constructor_stats.h"
-#include
-#include
-
-#include
-
-template
-class NonZeroIterator {
- const T* ptr_;
-public:
- NonZeroIterator(const T* ptr) : ptr_(ptr) {}
- const T& operator*() const { return *ptr_; }
- NonZeroIterator& operator++() { ++ptr_; return *this; }
-};
-
-class NonZeroSentinel {};
-
-template
-bool operator==(const NonZeroIterator>& it, const NonZeroSentinel&) {
- return !(*it).first || !(*it).second;
-}
-
-template
-py::list test_random_access_iterator(PythonType x) {
- if (x.size() < 5)
- throw py::value_error("Please provide at least 5 elements for testing.");
-
- auto checks = py::list();
- auto assert_equal = [&checks](py::handle a, py::handle b) {
- auto result = PyObject_RichCompareBool(a.ptr(), b.ptr(), Py_EQ);
- if (result == -1) { throw py::error_already_set(); }
- checks.append(result != 0);
- };
-
- auto it = x.begin();
- assert_equal(x[0], *it);
- assert_equal(x[0], it[0]);
- assert_equal(x[1], it[1]);
-
- assert_equal(x[1], *(++it));
- assert_equal(x[1], *(it++));
- assert_equal(x[2], *it);
- assert_equal(x[3], *(it += 1));
- assert_equal(x[2], *(--it));
- assert_equal(x[2], *(it--));
- assert_equal(x[1], *it);
- assert_equal(x[0], *(it -= 1));
-
- assert_equal(it->attr("real"), x[0].attr("real"));
- assert_equal((it + 1)->attr("real"), x[1].attr("real"));
-
- assert_equal(x[1], *(it + 1));
- assert_equal(x[1], *(1 + it));
- it += 3;
- assert_equal(x[1], *(it - 2));
-
- checks.append(static_cast(x.end() - x.begin()) == x.size());
- checks.append((x.begin() + static_cast(x.size())) == x.end());
- checks.append(x.begin() < x.end());
-
- return checks;
-}
-
-TEST_SUBMODULE(sequences_and_iterators, m) {
- // test_sliceable
- class Sliceable{
- public:
- Sliceable(int n): size(n) {}
- int start,stop,step;
- int size;
- };
- py::class_(m,"Sliceable")
- .def(py::init())
- .def("__getitem__",[](const Sliceable &s, py::slice slice) {
- ssize_t start, stop, step, slicelength;
- if (!slice.compute(s.size, &start, &stop, &step, &slicelength))
- throw py::error_already_set();
- int istart = static_cast(start);
- int istop = static_cast(stop);
- int istep = static_cast(step);
- return std::make_tuple(istart,istop,istep);
- })
- ;
-
- // test_sequence
- class Sequence {
- public:
- Sequence(size_t size) : m_size(size) {
- print_created(this, "of size", m_size);
- m_data = new float[size];
- memset(m_data, 0, sizeof(float) * size);
- }
- Sequence(const std::vector &value) : m_size(value.size()) {
- print_created(this, "of size", m_size, "from std::vector");
- m_data = new float[m_size];
- memcpy(m_data, &value[0], sizeof(float) * m_size);
- }
- Sequence(const Sequence &s) : m_size(s.m_size) {
- print_copy_created(this);
- m_data = new float[m_size];
- memcpy(m_data, s.m_data, sizeof(float)*m_size);
- }
- Sequence(Sequence &&s) : m_size(s.m_size), m_data(s.m_data) {
- print_move_created(this);
- s.m_size = 0;
- s.m_data = nullptr;
- }
-
- ~Sequence() { print_destroyed(this); delete[] m_data; }
-
- Sequence &operator=(const Sequence &s) {
- if (&s != this) {
- delete[] m_data;
- m_size = s.m_size;
- m_data = new float[m_size];
- memcpy(m_data, s.m_data, sizeof(float)*m_size);
- }
- print_copy_assigned(this);
- return *this;
- }
-
- Sequence &operator=(Sequence &&s) {
- if (&s != this) {
- delete[] m_data;
- m_size = s.m_size;
- m_data = s.m_data;
- s.m_size = 0;
- s.m_data = nullptr;
- }
- print_move_assigned(this);
- return *this;
- }
-
- bool operator==(const Sequence &s) const {
- if (m_size != s.size()) return false;
- for (size_t i = 0; i < m_size; ++i)
- if (m_data[i] != s[i])
- return false;
- return true;
- }
- bool operator!=(const Sequence &s) const { return !operator==(s); }
-
- float operator[](size_t index) const { return m_data[index]; }
- float &operator[](size_t index) { return m_data[index]; }
-
- bool contains(float v) const {
- for (size_t i = 0; i < m_size; ++i)
- if (v == m_data[i])
- return true;
- return false;
- }
-
- Sequence reversed() const {
- Sequence result(m_size);
- for (size_t i = 0; i < m_size; ++i)
- result[m_size - i - 1] = m_data[i];
- return result;
- }
-
- size_t size() const { return m_size; }
-
- const float *begin() const { return m_data; }
- const float *end() const { return m_data+m_size; }
-
- private:
- size_t m_size;
- float *m_data;
- };
- py::class_(m, "Sequence")
- .def(py::init())
- .def(py::init&>())
- /// Bare bones interface
- .def("__getitem__", [](const Sequence &s, size_t i) {
- if (i >= s.size()) throw py::index_error();
- return s[i];
- })
- .def("__setitem__", [](Sequence &s, size_t i, float v) {
- if (i >= s.size()) throw py::index_error();
- s[i] = v;
- })
- .def("__len__", &Sequence::size)
- /// Optional sequence protocol operations
- .def("__iter__", [](const Sequence &s) { return py::make_iterator(s.begin(), s.end()); },
- py::keep_alive<0, 1>() /* Essential: keep object alive while iterator exists */)
- .def("__contains__", [](const Sequence &s, float v) { return s.contains(v); })
- .def("__reversed__", [](const Sequence &s) -> Sequence { return s.reversed(); })
- /// Slicing protocol (optional)
- .def("__getitem__", [](const Sequence &s, py::slice slice) -> Sequence* {
- size_t start, stop, step, slicelength;
- if (!slice.compute(s.size(), &start, &stop, &step, &slicelength))
- throw py::error_already_set();
- Sequence *seq = new Sequence(slicelength);
- for (size_t i = 0; i < slicelength; ++i) {
- (*seq)[i] = s[start]; start += step;
- }
- return seq;
- })
- .def("__setitem__", [](Sequence &s, py::slice slice, const Sequence &value) {
- size_t start, stop, step, slicelength;
- if (!slice.compute(s.size(), &start, &stop, &step, &slicelength))
- throw py::error_already_set();
- if (slicelength != value.size())
- throw std::runtime_error("Left and right hand size of slice assignment have different sizes!");
- for (size_t i = 0; i < slicelength; ++i) {
- s[start] = value[i]; start += step;
- }
- })
- /// Comparisons
- .def(py::self == py::self)
- .def(py::self != py::self)
- // Could also define py::self + py::self for concatenation, etc.
- ;
-
- // test_map_iterator
- // Interface of a map-like object that isn't (directly) an unordered_map, but provides some basic
- // map-like functionality.
- class StringMap {
- public:
- StringMap() = default;
- StringMap(std::unordered_map init)
- : map(std::move(init)) {}
-
- void set(std::string key, std::string val) { map[key] = val; }
- std::string get(std::string key) const { return map.at(key); }
- size_t size() const { return map.size(); }
- private:
- std::unordered_map map;
- public:
- decltype(map.cbegin()) begin() const { return map.cbegin(); }
- decltype(map.cend()) end() const { return map.cend(); }
- };
- py::class_(m, "StringMap")
- .def(py::init<>())
- .def(py::init>())
- .def("__getitem__", [](const StringMap &map, std::string key) {
- try { return map.get(key); }
- catch (const std::out_of_range&) {
- throw py::key_error("key '" + key + "' does not exist");
- }
- })
- .def("__setitem__", &StringMap::set)
- .def("__len__", &StringMap::size)
- .def("__iter__", [](const StringMap &map) { return py::make_key_iterator(map.begin(), map.end()); },
- py::keep_alive<0, 1>())
- .def("items", [](const StringMap &map) { return py::make_iterator(map.begin(), map.end()); },
- py::keep_alive<0, 1>())
- ;
-
- // test_generalized_iterators
- class IntPairs {
- public:
- IntPairs(std::vector> data) : data_(std::move(data)) {}
- const std::pair* begin() const { return data_.data(); }
- private:
- std::vector> data_;
- };
- py::class_(m, "IntPairs")
- .def(py::init>>())
- .def("nonzero", [](const IntPairs& s) {
- return py::make_iterator(NonZeroIterator>(s.begin()), NonZeroSentinel());
- }, py::keep_alive<0, 1>())
- .def("nonzero_keys", [](const IntPairs& s) {
- return py::make_key_iterator(NonZeroIterator>(s.begin()), NonZeroSentinel());
- }, py::keep_alive<0, 1>())
- ;
-
-
-#if 0
- // Obsolete: special data structure for exposing custom iterator types to python
- // kept here for illustrative purposes because there might be some use cases which
- // are not covered by the much simpler py::make_iterator
-
- struct PySequenceIterator {
- PySequenceIterator(const Sequence &seq, py::object ref) : seq(seq), ref(ref) { }
-
- float next() {
- if (index == seq.size())
- throw py::stop_iteration();
- return seq[index++];
- }
-
- const Sequence &seq;
- py::object ref; // keep a reference
- size_t index = 0;
- };
-
- py::class_(seq, "Iterator")
- .def("__iter__", [](PySequenceIterator &it) -> PySequenceIterator& { return it; })
- .def("__next__", &PySequenceIterator::next);
-
- On the actual Sequence object, the iterator would be constructed as follows:
- .def("__iter__", [](py::object s) { return PySequenceIterator(s.cast(), s); })
-#endif
-
- // test_python_iterator_in_cpp
- m.def("object_to_list", [](py::object o) {
- auto l = py::list();
- for (auto item : o) {
- l.append(item);
- }
- return l;
- });
-
- m.def("iterator_to_list", [](py::iterator it) {
- auto l = py::list();
- while (it != py::iterator::sentinel()) {
- l.append(*it);
- ++it;
- }
- return l;
- });
-
- // test_sequence_length: check that Python sequences can be converted to py::sequence.
- m.def("sequence_length", [](py::sequence seq) { return seq.size(); });
-
- // Make sure that py::iterator works with std algorithms
- m.def("count_none", [](py::object o) {
- return std::count_if(o.begin(), o.end(), [](py::handle h) { return h.is_none(); });
- });
-
- m.def("find_none", [](py::object o) {
- auto it = std::find_if(o.begin(), o.end(), [](py::handle h) { return h.is_none(); });
- return it->is_none();
- });
-
- m.def("count_nonzeros", [](py::dict d) {
- return std::count_if(d.begin(), d.end(), [](std::pair p) {
- return p.second.cast() != 0;
- });
- });
-
- m.def("tuple_iterator", &test_random_access_iterator);
- m.def("list_iterator", &test_random_access_iterator);
- m.def("sequence_iterator", &test_random_access_iterator);
-
- // test_iterator_passthrough
- // #181: iterator passthrough did not compile
- m.def("iterator_passthrough", [](py::iterator s) -> py::iterator {
- return py::make_iterator(std::begin(s), std::end(s));
- });
-
- // test_iterator_rvp
- // #388: Can't make iterators via make_iterator() with different r/v policies
- static std::vector list = { 1, 2, 3 };
- m.def("make_iterator_1", []() { return py::make_iterator(list); });
- m.def("make_iterator_2", []() { return py::make_iterator(list); });
-}
diff --git a/spaces/magicr/BuboGPT/README.md b/spaces/magicr/BuboGPT/README.md
deleted file mode 100644
index 825c07559e74a961e6d20f7fada7f59f82d4a519..0000000000000000000000000000000000000000
--- a/spaces/magicr/BuboGPT/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: BuboGPT
-emoji: 🏢
-colorFrom: pink
-colorTo: red
-sdk: gradio
-sdk_version: 3.36.1
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the [paper page](https://huggingface.co/papers/2307.08581), the [project page](https://bubo-gpt.github.io/).
\ No newline at end of file
diff --git a/spaces/magicr/BuboGPT/match.py b/spaces/magicr/BuboGPT/match.py
deleted file mode 100644
index 56a2d4fbf9db0fa8f04e19d98f0a1dcaefdd24ec..0000000000000000000000000000000000000000
--- a/spaces/magicr/BuboGPT/match.py
+++ /dev/null
@@ -1,184 +0,0 @@
-import os
-import torch
-import torch.nn as nn
-import openai
-import random
-import contextlib
-
-from constants.constant import COLORS
-
-
-@contextlib.contextmanager
-def auto_proxy():
- use_proxy = "OPENAI_PROXY" in os.environ
- if use_proxy:
- os.environ['http_proxy'] = os.environ["OPENAI_PROXY"]
- os.environ['https_proxy'] = os.environ["OPENAI_PROXY"]
-
- yield
-
- if use_proxy:
- os.unsetenv('http_proxy')
- os.unsetenv('https_proxy')
-
-
-class MatchModule(nn.Module):
- def __init__(self, device='cpu', model="gpt-3.5-turbo"):
- super().__init__()
- self.device = device
- self.model = model
- if "OPENAI_API_KEY" not in os.environ:
- raise RuntimeError("Please specify your openai API key with the environment variable OPENAI_API_KEY")
- openai.api_key = os.environ["OPENAI_API_KEY"]
- self.examples = [
- (
- "['dog', 'sheepdog', 'grass', 'chase sheepdog', 'field', 'field park', 'grassy', 'corgi', 'brown dog', 'brown', 'park']"
- "A brown dog running in the grassy field",
- 'brown dog - brown dog\n'
- 'grassy field - field'
- ),
- (
- "['man', 'ride', 'bicycle', 'red', 'passenger train', 'track']"
- "A man riding a bicycle next to a red passenger train on the tracks.",
- "man - man\n"
- "bicycle - bicycle\n"
- "red passenger train - passenger train\n"
- "tracks - track"
- ),
- (
- "['horse', 'herd', 'dust', 'grassy', 'field']"
- "The image shows a large herd of wild horses running across a wide, open field . "
- "There are many horses running in different directions, with some running towards the camera "
- "and others running towards the edge of the field. "
- "The horses are brown and white, with some having manes and tails",
- "herd - herd\n"
- "wild horses - horse\n"
- "field - field"
- ),
- (
- "['man', 'plate platter', 'sandwich', 'tablening table', 'saucer', 'coffee coffee cup', 'coffee', 'bean chip fry', 'chip fry', 'coffee cup', 'bean', 'food', 'table', 'restaurant']"
- "The image shows a man sitting at a table , surrounded by a large amount of food and drinks . There is a chicken sandwich on the table, as well as a bowl of soup, potato wedges, and several fried potatoes. The man is holding a spoon, which he is expected to use to eat one of the wedges or possibly a piece of the chicken sandwich. The other items on the table, such as drinks and a bowl of soup, appear to be for those accompanying the man at the table. The scene takes place in a dining establishment , likely a restaurant , based on the presence of a spoon and food items on the table, along with a tablecloth and table setting. Additionally, the presence of several chairs and the overall setup suggest this is a formal, sit-down setting rather than a fast food or take-out restaurant. The amount of food on the table suggests that this is a hearty, satisfying meal, providing a range of flavors and textures that satisfy the palate.",
- "man - man\n"
- "table - table\n"
- "food - food\n"
- "chicken sandwich - sandwich\n"
- "restaurant - restaurant\n"
- "fried potatoes - chip fry\n"
- "drinks - coffee"
- ),
- (
- "['bacon', 'silverware utensil', 'fork', 'coffee', 'table dinning table', 'plate platter', 'beverage', 'napkin', 'bread french toast pan', 'pine cone', 'coffee cup cup mug', 'fruit', 'breakfast food fruit', 'bacon', 'gravy', 'bread pancake']"
- "The image presents a delicious breakfast setting on a wooden dining table. The main course is a white plate with French toast and bacon . Adding to the meal are a bottle of maple syrup and a cup of coffee , both placed next to the plate. The table is set with a fork , a knife, and a spoon, all arranged neatly around the plate. There are also a few apples scattered across the table, possibly serving as a healthy addition to the meal. Overall, the scene is inviting and warmly lit, making the breakfast look especially appetizing.",
- "wooden dinning table - table dinning table\n"
- "fork - fork\n"
- "coffee - coffee\n"
- "apples - fruit\n"
- "white plate - plate platter\n"
- "french toast - bread french toast pan\n"
- "bacon - bacon"
- ),
- (
- "['woman', 'canopy', 'man', 'dog pet', 'dog', 'canopy', 'bicycle', 'person', 'leash', "
- "'dog pet', 'leash', 'stall', 'person woman', 'dog pet', 'city street road', 'street scene']"
- "The image captures a lively street scene with several people walking and riding bikes. "
- "There are two bicycles in the picture, one located in the middle of the scene and the other towards "
- "the right side. Among the people, some are walking close to the bicycles, while others are scattered"
- "throughout the scene. In addition to the bicycles and people, there are four dogs in the picture, "
- "adding to the liveliness of the scene. The dogs are walking around the street, mingling with the "
- "pedestrians and bikers. The street is bustling with activity, as people, bikes, and dogs all "
- "share the space and enjoy the day.",
- "street scene - street scene\n"
- "the street - city street road\n"
- "bicycles - bicycle\n"
- "four dogs - dog\n"
- "people - person"
- )
- ]
- self.system_prompt = "You are a helpful assistant. Now I will give you a list of entities and give you a " \
- "paragraph or sentence. " \
- "you need to first extract the entity given in the text and then" \
- "find the corresponding entity having similar or identical meanings in the given list. " \
- "Find all the pairs." \
- "Are you clear? let us think step by step. " \
- "The extracted entities must come from the given text and the corresponding entity must " \
- "come from the given list. " \
- "If multiple entities can be linked to the same span of text or vice versa, " \
- "just keep one and do not merge them." \
- "Here is an example: ['dog', 'sheepdog', 'grass', 'chase sheepdog', 'field', " \
- "'field park', 'grassy', 'corgi', 'brown dog', 'brown', 'park'] " \
- "A brown dog running in the grassy field" \
- "The answer is: brown dog — brown dog \n grassy field — field"
-
- @torch.no_grad()
- def forward(self, text, entity_state):
- entity_list = list(entity_state['grounding']['local'].keys())
- message = [
- {"role": "system", "content": self.system_prompt},
- ]
- for q, a in self.examples:
- message.append({"role": "user", "content": q})
- message.append({"role": "system", "content": a})
- message.append({
- "role": "user",
- "content": '{}{}'.format(entity_state['grounding']['local'].keys(), text)
- })
-
- print('==> Sending request to ChatGPT...')
- with auto_proxy():
- resp = openai.ChatCompletion.create(
- model=self.model,
- messages=message
- )
- ans = resp['choices'][0]['message']['content']
- print("===> In the matching module.")
- print('==> Response from ChatGPT received: {}.'.format(ans))
- # print(resp)
- items = ans.split('\n')
- res = []
- match_state = {}
- for i in items:
- if ' - ' not in i:
- continue
- name, ref = i.split(' - ', maxsplit=1)
- name, ref = name.lower(), ref.lower()
- # NOTE: ref may not be contained in the original text, double check later.
- if ref in entity_list:
- color_name = entity_state['grounding']['local'][ref]['color']
- else:
- print('pair {} - {} not found'.format(name, ref))
- # color_name = "grey"
- continue
- match_state[name] = ref
- entity_idx = text.lower().find(name)
- if entity_idx == -1:
- entity_idx = text.lower().find(name.lower())
- ref = name
- if entity_idx == -1:
- continue
-
- res.append((name, ref, entity_idx, color_name))
- res = sorted(res, key=lambda x: x[2])
- # TODO: Bug to fix
- highlight_output = []
- prev = 0
- color_map = {}
-
- for i, r in enumerate(res):
- if r[2] < prev:
- continue
- # to avoid one-vs-many alignments
- if r[2] != prev:
- highlight_output.append((text[prev:r[2]], None))
- highlight_output.append((text[r[2]:r[2] + len(r[0])], f'{i + 1}'))
- color_map[f'{i + 1}'] = r[-1]
- prev = r[2] + len(r[0])
- if prev != len(text) - 1:
- highlight_output.append((text[prev:], None))
- print("=======> Highlight Output: ", highlight_output)
- return highlight_output, match_state, color_map
-
-
-if __name__ == '__main__':
- ner = MatchModule(model='gpt-4')
- print(
- ner('The image shows a resort with a large swimming pool surrounded by lounge chairs and umbrellas. There are several buildings in the background with white walls and blue roofs. There are sand dunes and palm trees in the background indicating that the resort is located in a desert area. The sky is clear and blue with a few fluffy clouds in the distance.'))
diff --git a/spaces/mateuseap/magic-vocals/lib/infer_pack/modules/F0Predictor/__init__.py b/spaces/mateuseap/magic-vocals/lib/infer_pack/modules/F0Predictor/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/matthoffner/starchat-ui/components/Promptbar/components/PromptModal.tsx b/spaces/matthoffner/starchat-ui/components/Promptbar/components/PromptModal.tsx
deleted file mode 100644
index 81bd26cedf428ba31308e0ce40024f0c237c6b0b..0000000000000000000000000000000000000000
--- a/spaces/matthoffner/starchat-ui/components/Promptbar/components/PromptModal.tsx
+++ /dev/null
@@ -1,130 +0,0 @@
-import { FC, KeyboardEvent, useEffect, useRef, useState } from 'react';
-
-import { useTranslation } from 'next-i18next';
-
-import { Prompt } from '@/types/prompt';
-
-interface Props {
- prompt: Prompt;
- onClose: () => void;
- onUpdatePrompt: (prompt: Prompt) => void;
-}
-
-export const PromptModal: FC = ({ prompt, onClose, onUpdatePrompt }) => {
- const { t } = useTranslation('promptbar');
- const [name, setName] = useState(prompt.name);
- const [description, setDescription] = useState(prompt.description);
- const [content, setContent] = useState(prompt.content);
-
- const modalRef = useRef(null);
- const nameInputRef = useRef(null);
-
- const handleEnter = (e: KeyboardEvent) => {
- if (e.key === 'Enter' && !e.shiftKey) {
- onUpdatePrompt({ ...prompt, name, description, content: content.trim() });
- onClose();
- }
- };
-
- useEffect(() => {
- const handleMouseDown = (e: MouseEvent) => {
- if (modalRef.current && !modalRef.current.contains(e.target as Node)) {
- window.addEventListener('mouseup', handleMouseUp);
- }
- };
-
- const handleMouseUp = (e: MouseEvent) => {
- window.removeEventListener('mouseup', handleMouseUp);
- onClose();
- };
-
- window.addEventListener('mousedown', handleMouseDown);
-
- return () => {
- window.removeEventListener('mousedown', handleMouseDown);
- };
- }, [onClose]);
-
- useEffect(() => {
- nameInputRef.current?.focus();
- }, []);
-
- return (
-
-
-
-
-
-
-
- {t('Name')}
-
- setName(e.target.value)}
- />
-
-
- {t('Description')}
-
-
-
-
-
- );
-};
diff --git a/spaces/mbarnig/lb-de-en-fr-pt-COQUI-STT/app.py b/spaces/mbarnig/lb-de-en-fr-pt-COQUI-STT/app.py
deleted file mode 100644
index da59909095c14d5bdf5b6b39a628f07e5ff842a6..0000000000000000000000000000000000000000
--- a/spaces/mbarnig/lb-de-en-fr-pt-COQUI-STT/app.py
+++ /dev/null
@@ -1,151 +0,0 @@
-import gradio as gr
-import numpy as np
-import wave
-from huggingface_hub import hf_hub_download
-from stt import Model
-
-state = gr.Variable()
-
-REPO_ID = "mbarnig/lb-de-fr-en-pt-coqui-stt-models"
-
-my_title = "🇩🇪 🇫🇷 🇬🇧 🇵🇹 Mir verstinn och Lëtzebuergesch ! 🇱🇺"
-my_description = "Multilingual Speech-to-Text (STT) system understanding Lëtzebuergesch, Deutsch, Français, English and Português. My luxembourgish stt-model is based on [Coqui-STT version 1.3.0](https://github.com/coqui-ai/STT), the other models are downloaded from the [Coqui Model Zoo](https://coqui.ai/models). Thanks to 🐸 [Coqui.ai](https://https://coqui.ai/)."
-
-STT_LANGUAGES = [
- "Deutsch",
- "English",
- "Français",
- "Lëtzebuergesch",
- "Português"
-]
-
-EXAMPLES = [
- ["examples/german.wav", "Deutsch", True, "Thorsten", "wir setzen uns deshalb für eine zweistaaten lösung ein und hoffen auch dass hier fortschritte im friedensprozess gemacht werden"],
- ["examples/english.wav", "English", True, "Linda", "every window and roof which could command a view of the horrible performance was occupied"],
- ["examples/french.wav", "Français", True, "Bernard", "chacun avait sa part dans ces travaux suivant les prescriptions d'un règlement affiché dans la grande salle"],
- ["examples/luxembourgish.wav", "Lëtzebuergesch", True, "Pit", "ma och den aarbechtsmaart muss weider wuessen fir datt de system funktionéiert déi faméis rentemauer steet schonn do ze wénken"],
- ["examples/portuguese.wav", "Português", True, "Ed", "academicismo ou academismo designam originalmente o método de ensino artístico profissionalizante concebido formalizado e ministrado pelas academias de arte europeias"]
-]
-
-def reformat_freq(sr, y):
- if sr not in (
- 48000,
- 16000,
- ): # Deepspeech only supports 16k, (we convert 48k -> 16k)
- raise ValueError("Unsupported rate", sr)
- if sr == 48000:
- y = (
- ((y / max(np.max(y), 1)) * 32767)
- .reshape((-1, 3))
- .mean(axis=1)
- .astype("int16")
- )
- sr = 16000
- return sr, y
-
-def customization(language, scorer):
- if language == "Lëtzebuergesch":
- lb_stt_model_path = hf_hub_download(repo_id = REPO_ID, filename = "luxembourgish/model.tflite")
- lb_stt_scorer_path = hf_hub_download(repo_id = REPO_ID, filename = "luxembourgish/kenlm-luxembourgish.scorer")
- myModel = Model(lb_stt_model_path)
- myScorer_path = lb_stt_scorer_path
- elif language == "Deutsch":
- de_stt_model_path = hf_hub_download(repo_id = REPO_ID, filename = "german/model.tflite")
- de_stt_scorer_path = hf_hub_download(repo_id = REPO_ID, filename = "german/de-aashishag-1-prune-kenlm.scorer")
- myModel = Model(de_stt_model_path)
- myScorer_path = de_stt_scorer_path
- elif language == "Français":
- fr_stt_model_path = hf_hub_download(repo_id = REPO_ID, filename = "french/model.tflite")
- fr_stt_scorer_path = hf_hub_download(repo_id = REPO_ID, filename = "french/kenlm.scorer")
- myModel = Model(fr_stt_model_path)
- myScorer_path = fr_stt_scorer_path
- elif language == "English":
- en_stt_model_path = hf_hub_download(repo_id = REPO_ID, filename = "english/model.tflite")
- en_stt_scorer_path = hf_hub_download(repo_id = REPO_ID, filename = "english/huge-vocabulary.scorer")
- myModel = Model(en_stt_model_path)
- myScorer_path = en_stt_scorer_path
- elif language == "Português":
- pt_stt_model_path = hf_hub_download(repo_id = REPO_ID, filename = "portuguese/model.tflite")
- pt_stt_scorer_path = hf_hub_download(repo_id = REPO_ID, filename = "portuguese/pt-itml-0-prune-kenlm.scorer")
- myModel = Model(pt_stt_model_path)
- myScorer_path = pt_stt_scorer_path
- else:
- print("Please select a language !")
- return myModel, myScorer_path
-
-def stt_upload(audio_file_path, language, scorer, speaker, groundtruth):
- if audio_file_path:
- acoustic_model, scorer_path = customization(language, scorer)
- audio = wave.open(audio_file_path, 'r')
- audio_buffer = np.frombuffer(audio.readframes(audio.getnframes()), np.int16)
- if scorer:
- acoustic_model.enableExternalScorer(scorer_path)
- result = acoustic_model.stt(audio_buffer)
- else:
- acoustic_model.disableExternalScorer()
- result = acoustic_model.stt(audio_buffer)
- return result
- else:
- print("Please upload an audio file with sample-rate 16000 Hz for transcription !")
-
-def stt_record(language, scorer, audio_record_buffer, state=""):
- if audio_record_buffer:
- acoustic_model, scorer_path = customization(language, scorer)
- _, y = reformat_freq(*audio_record_buffer)
- if scorer:
- acoustic_model.enableExternalScorer(scorer_path)
- result = acoustic_model.stt(y)
- else:
- acoustic_model.disableExternalScorer()
- result = acoustic_model.stt(y)
- newstate = state + result + " "
- return newstate, newstate
- else:
- print("Please record your own speech in the selected language for transcription !")
-
-upload_article = "
User guide
1. Click one row from the examples and view the results. Compare the transcription with the ground-truth text. 2. Clear the interface and upload your own audio-file in the selected language. The sampling-rate of the audio file must be 16000 Hz. 3. Submit an audio-file with or without a language model and compare the results. 4. Switch to the realtime-streaming STT tab and record your own speech. 5. Have fun !
"
-
-record_article = "
User guide
1. Record your own speech in the selected language and view the automatic streamed transcription which is updated continuously with additionalwords. 2. Stop the recording and compare the final transcription with your input. 3. Switch to the file-upload STT tab to test the examples in the different languages. 4. Have fun!"
-
-upload_inputs = [
- gr.Audio(type="filepath", label="Upload Audio"),
- gr.Radio(label="Language", choices = STT_LANGUAGES, value = "Lëtzebuergesch"),
- gr.Checkbox(label="use language model", value = True),
- gr.Textbox(label = "Speaker", visible=False),
- gr.Textbox(label = "Groundtruth", visible=False)
-]
-
-record_inputs = [
- gr.Radio(label="Language", choices = STT_LANGUAGES, value = "Lëtzebuergesch"),
- gr.Checkbox(label="use language model", value = True),
- gr.Audio(source="microphone", type="numpy", label="Record Audio", streaming=True), "state"
-]
-
-upload_outputs = gr.Textbox(lines=5, label="Transcription")
-
-record_outputs = [gr.Textbox(lines=5, label="Transcription"), "state"]
-
-upload_iface = gr.Interface(
- fn=stt_upload,
- inputs=upload_inputs,
- outputs=upload_outputs,
- title=my_title,
- description = my_description,
- article = upload_article,
- examples = EXAMPLES,
- allow_flagging = False
-)
-
-record_iface = gr.Interface(
- fn=stt_record,
- inputs=record_inputs,
- outputs=record_outputs,
- title=my_title,
- description = my_description,
- article = record_article,
- allow_flagging = False,
- live=True
-)
-
-iface = gr.TabbedInterface([upload_iface, record_iface], ["Text-to-Speech with audio-file upload", "Realtime Text-to-Speech"])
-iface.launch()
diff --git a/spaces/merve/fill-in-the-blank/public/measuring-diversity/index.html b/spaces/merve/fill-in-the-blank/public/measuring-diversity/index.html
deleted file mode 100644
index 152d63d665428726e115c623d650d9ad5bef780b..0000000000000000000000000000000000000000
--- a/spaces/merve/fill-in-the-blank/public/measuring-diversity/index.html
+++ /dev/null
@@ -1,167 +0,0 @@
-
-
-
-
-
-
Search results that reflect historic inequities can amplify stereotypes and perpetuate under-representation. Carefully measuring diversity in data sets can help.
-
-
-
Search, ranking and recommendation systems can help find useful documents in large datasets. However, these datasets reflect the biases of the society in which they were created and the systems risk re-entrenching those biases. For example, if someone who is not a white man searches for “CEO pictures” and sees a page of white men, they may feel that only white men can be CEOs, further perpetuating lack of representation at companies’ executive levels.
The mathematics of all this is a little easier to follow with abstract shapes. Let’s take a look at some of them:
-
-
-
Suppose we want to return about 30% green boxes to reflect the distribution of some larger universe of shapes. Try clicking on the shapes below to select some of them — can you find a better subset to return?
-
-
-
Another diversity metric we care about is the percentage of dots… how close to 35% dots can you get?
-
-
-
If we can only return a single subset, how should we consider multiple diversity metrics? Sometimes it isn’t possible to reduce the difference of every metric to zero. One natural approach: find the selection with the lowest mean difference across all the metrics to get as close as possible to all the targets.
-
In other circumstances, like picking a panel of speakers, avoiding badly representing any single category might be more important. This can be done by finding the subset with the lowest max difference. Try minimizing both below:
-
-
-
Notice that minimizing the mean results in a different subset than minimizing the max; how else might using one over the other change the results?
-
Ranking Measures
-
We can pull out more detail by showing how the mean difference and maximum difference rank lots of sets. Below, there are 20 sets of 10 shapes sorted by the two measures. Try adjusting the target slider on the left to see how the rankings change; each set’s percentage of green, dots and small shapes are shown in the small histograms.
-
-
-
At the extremes, the choice of measure can have a big impact: if we want to try and return all green results, we can shift the green target up to 100%. With this target, the minimum difference basically sorts the sets by the number of green items and uses the other targets as a tiebreaker. In contrast, sorting by the mean difference balances the green target more with the dot and small targets.
-
-
-
Beyond mean and max differences, there are more ways to combine diversity metrics, like taking the cross of two metrics to account for intersectionality. The absolute value of the difference in target and actual percentages can also be quantified in other ways — you might want to penalize undershooting more than overshooting, for example. It’s important to keep in mind what exactly you’re trying to maximize and the dataset that you’re operating on.
-
Which Measure is Best?
-
In a vacuum, all of these ranking methods are defensible. Picking one requires knowledge of the dataset and broader societal context.
-
For example, the doctors on the left have more variance along the shirt color attribute, but they’re less diverse by gender than the doctors on the right. With the shirt color and gender targets we’ve picked, the two subsets have the same mean and max differences However, in most applications, it’s more important to have a representative sample of socially relevant characteristics, like gender, rather than something less salient, like clothing color.
-
-
-
Just selecting a diverse sample isn’t sufficient either. Diversity and Inclusion Metrics in Subset Selection introduces a way of measuring “inclusion” - how well does the searcher feel represented in the results?
-
Below, we have gender diversity, without inclusion for women, in the “construction worker” image domain. Masculine-presenting individuals are shown in realistic, modern construction worker situations, while feminine-presenting individuals and other gender presentations are depicted as historic nostalgia, toys, clipart, or passive.
-
-
-
The context of the query and the searcher also plays in the quality of search results. A search for “work clothing” that shows a mixed palette of colors for men’s clothing and only pink women’s clothing might make the searcher feel that women need to appear stereotypically feminine in a professional setting. But the same set of women’s clothes might be appropriate to show for a “pink women work clothes” search or if the searcher had previously expressed a preference for pink.
-
We saw how a small switch from mean to max made a huge difference in what abstract shapes are returned – and how things can get even more complex when socially salient characteristics are layered in. Defaults and small decisions can encode our priorities and values; intentionally thinking about how diversity and inclusion are being measured and which characteristics are emphasized is a step towards designing more equitable systems.
-
More Reading
-
The Diversity and Inclusion Metrics paper has a Colab with a detailed desciption of the metrics, additional visualizations and a reference Python implementation.
Inferring user preferences is also tricky; you can checkout ways to design for user feedback and control over queries in the People + AI Guidebook.
-
Credits
-
Adam Pearce, Dylan Baker, Ellen Jiang, Meg Mitchell* and Timnit Gebru* // March 2021
-
*Work done while at Google
-
Thanks to Alex Hanna, Carey Radebaugh, Emily Denton, Fernanda Viégas, James Wexler, Jess Holbrook, Ludovic Peran, Martin Wattenberg, Michael Terry, Yannick Assogba and Zan Armstrong for their help with this piece.
-
More Explorables
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/merve/fill-in-the-blank/source/private-and-fair/accuracy-v-privacy-class.js b/spaces/merve/fill-in-the-blank/source/private-and-fair/accuracy-v-privacy-class.js
deleted file mode 100644
index 39daddb629006c967bfa8c3a6c1d43fc9887bc1b..0000000000000000000000000000000000000000
--- a/spaces/merve/fill-in-the-blank/source/private-and-fair/accuracy-v-privacy-class.js
+++ /dev/null
@@ -1,285 +0,0 @@
-var state = {
- dataset_size: 15000,
- threshold: .8,
- label: 8
-}
-
-var sel = d3.select('.accuracy-v-privacy-class').html('')
- .at({role: 'graphics-document', 'aria-label': `Line chart showing that high accuracy models can still perform poorly on some digit classes.`})
-
-async function loadData(){
- var rawData = await util.getFile(`cns-cache/grid_${state.dataset_size}trainpoints_test_labels.csv`)
-
- rawData.forEach(d => {
- delete d['']
- d.i = +d.i
- d.label = +d.label
- })
-
- var aVal2Meta = {}
- var metadata = await util.getFile('cns-cache/model_grid_test_accuracy.json')
- metadata
- .filter(d => d.dataset_size == state.dataset_size)
- .forEach(d => aVal2Meta['aVal_' + d.aVal] = d)
-
- var allCols = d3.keys(rawData[0])
- .filter(d => d.includes('aVal'))
- .map(key => {
- var {epsilon, aVal} = aVal2Meta[key]
- return {key, epsilon, aVal}
- })
-
- var byDigit = d3.nestBy(rawData, d => d.label)
- byDigit.forEach(d => {
- d.label = +d.key
- })
- byDigit.forEach(digitClass => {
- digitClass.cols = allCols.map(({key, epsilon}, colIndex) => {
- return {
- key,
- colIndex,
- epsilon,
- digitClass,
- label: digitClass.label,
- accuracy: d3.mean(digitClass, d => d[key] > state.threshold)
- }
- })
- })
-
- var data = _.flatten(byDigit.map(d => d.cols))
- .filter(d => util.epsilonExtent[1] <= d.epsilon && d.epsilon <= util.epsilonExtent[0])
- var byLabel = d3.nestBy(data, d => d.label)
- byLabel.forEach((d, i) => {
- d.label = d.key
- })
-
- return {data, byLabel}
-}
-
-
-async function initChart(){
- var {data, byLabel} = await loadData()
-
- var c = d3.conventions({
- sel: sel.append('div'),
- height: 400,
- margin: {bottom: 75, top: 5},
- layers: 'ds',
- })
-
- c.x = d3.scaleLog().domain(util.epsilonExtent).range(c.x.range())
- c.xAxis = d3.axisBottom(c.x).tickFormat(d => {
- var rv = d + ''
- if (rv.split('').filter(d => d !=0 && d != '.')[0] == 1) return rv
- })
-
- c.yAxis.tickFormat(d => d3.format('.0%')(d))//.ticks(8)
- d3.drawAxis(c)
- util.addAxisLabel(c, 'Higher Privacy →', '')
- util.ggPlotBg(c, false)
- c.layers[0].append('div')
- .st({fontSize: 12, color: '#555', width: 120*2, textAlign: 'center', lineHeight: '1.3em', verticalAlign: 'top'})
- .translate([c.width/2 - 120, c.height + 45])
- .html('in ε')
-
- var line = d3.line().x(d => c.x(d.epsilon)).y(d => c.y(d.accuracy))
-
- var lineSel = c.svg.append('g').appendMany('path.accuracy-line', byLabel)
- .at({
- d: line,
- fill: 'none',
- stroke: '#000',
- // opacity: 0,
- })
- .on('mousemove', setActiveLabel)
-
- var circleSel = c.svg.append('g')
- .appendMany('g.accuracy-circle', data)
- .translate(d => [c.x(d.epsilon), c.y(d.accuracy)])
- .on('mousemove', setActiveLabel)
- // .call(d3.attachTooltip)
-
- circleSel.append('circle')
- .at({r: 7, stroke: '#fff'})
-
- circleSel.append('text')
- .text(d => d.label)
- .at({textAnchor: 'middle', fontSize: 10, fill: '#fff', dy: '.33em'})
-
- setActiveLabel(state)
- function setActiveLabel({label}){
- lineSel
- .classed('active', 0)
- .filter(d => d.label == label)
- .classed('active', 1)
- .raise()
-
- circleSel
- .classed('active', 0)
- .filter(d => d.label == label)
- .classed('active', 1)
- .raise()
-
- state.label = label
- }
-
-
- async function updateDatasetSize(){
- var newData = await loadData()
- data = newData.data
- byLabel = newData.byLabel
-
- lineSel.data(byLabel)
- .transition()
- .at({d: line})
-
- circleSel.data(data)
- .transition()
- .translate(d => [c.x(d.epsilon), c.y(d.accuracy)])
-
- c.svg.select('text.annotation').remove()
- }
-
- function updateThreshold(){
- data.forEach(d => {
- d.accuracy = d3.mean(d.digitClass, e => e[d.key] > state.threshold)
- })
-
- lineSel.at({d: line})
- circleSel.translate(d => [c.x(d.epsilon), c.y(d.accuracy)])
-
- c.svg.select('.y .axis-label').text(`Test Points With More Than ${d3.format('.2%')(state.threshold)} Confidence In Label`)
-
- c.svg.select('text.annotation').remove()
- }
- updateThreshold()
-
- return {c, updateDatasetSize, updateThreshold}
-}
-
-
-async function init(){
- sel.append('div.chart-title').text('High accuracy models can still perform poorly on some digit classes')
-
- var chart = await initChart()
-
- var buttonRowSel = sel.append('div.button-row')
- .st({height: 50})
-
- var buttonSel = buttonRowSel.append('div')
- .st({width: 500})
- .append('span.chart-title').text('Training points')
- .parent()
- .append('div').st({display: 'inline-block', width: 300, marginLeft: 10})
- .append('div.digit-button-container.dataset_size')
- .appendMany('div.button', [2000, 3750, 7500, 15000, 30000, 60000])
- .text(d3.format(','))
- .classed('active', d => d == state.dataset_size)
- .on('click', d => {
- buttonSel.classed('active', e => e == d)
- state.dataset_size = d
- chart.updateDatasetSize()
- })
-
- buttonRowSel.append('div.conf-slider')
- .append('span.chart-title').text('Confidence threshold')
- .parent()
- .append('input.slider-native')
- .at({
- type: 'range',
- min: .0001,
- max: .9999,
- step: .0001,
- value: state.threshold,
- })
- .on('input', function(){
- state.threshold = this.value
- chart.updateThreshold()
- })
-
-
- function addSliders(){
- var width = 140
- var height = 30
- var color = '#000'
-
- var sliders = [
- {key: 'threshold', label: 'Confidence threshold', r: [.0001, .9999]},
- ]
- sliders.forEach(d => {
- d.value = state[d.key]
- d.xScale = d3.scaleLinear().range([0, width]).domain(d.r).clamp(1)
- })
-
- d3.select('.conf-slider .slider-container').remove()
- d3.select('.slider-native').remove()
-
- var svgSel = d3.select('.conf-slider').parent()
- // .st({marginTop: 5, marginBottom: 5})
- .appendMany('div.slider-container', sliders)
- .append('svg').at({width, height})
- .append('g').translate([10, 25])
-
- var sliderSel = svgSel
- .on('click', function(d){
- d.value = d.xScale.invert(d3.mouse(this)[0])
- renderSliders(d)
- })
- .classed('slider', true)
- .st({cursor: 'pointer'})
-
- var textSel = sliderSel.append('text.annotation')
- .at({y: -15, fontWeight: 300, textAnchor: 'middle', x: 180/2})
-
- sliderSel.append('rect')
- .at({width, height, y: -height/2, fill: 'rgba(0,0,0,0)'})
-
- sliderSel.append('path').at({
- d: `M 0 -.5 H ${width}`,
- stroke: color,
- strokeWidth: 1
- })
-
- var leftPathSel = sliderSel.append('path').at({
- d: `M 0 -.5 H ${width}`,
- stroke: color,
- strokeWidth: 3
- })
-
- var drag = d3.drag()
- .on('drag', function(d){
- var x = d3.mouse(this)[0]
- d.value = d.xScale.invert(x)
-
- renderSliders(d)
- })
-
- var circleSel = sliderSel.append('circle').call(drag)
- .at({r: 7, stroke: '#000'})
-
- function renderSliders(d){
- if (d) state[d.key] = d.value
-
- circleSel.at({cx: d => d.xScale(d.value)})
- leftPathSel.at({d: d => `M 0 -.5 H ${d.xScale(d.value)}`})
- textSel
- .at({x: d => d.xScale(d.value)})
- .text(d => d3.format('.2%')(d.value))
- chart.updateThreshold()
- }
- renderSliders()
- }
- addSliders()
-
-
- chart.c.svg.append('text.annotation')
- .translate([505, 212])
- .tspans(d3.wordwrap(`8s are correctly predicted with high confidence much more rarely than other digits`, 25), 12)
- .at({textAnchor: 'end'})
-
-}
-init()
-
-
-
-
diff --git a/spaces/merve/gradio-analysis-dashboard-minimal/app.py b/spaces/merve/gradio-analysis-dashboard-minimal/app.py
deleted file mode 100644
index c2319c33ab3bafac616af5f4930d25af048e6aa0..0000000000000000000000000000000000000000
--- a/spaces/merve/gradio-analysis-dashboard-minimal/app.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import gradio as gr
-import pandas as pd
-import datasets
-import seaborn as sns
-import matplotlib.pyplot as plt
-
-df = datasets.load_dataset("merve/supersoaker-failures")
-df = df["train"].to_pandas()
-df.dropna(axis=0, inplace=True)
-
-def plot(df):
- plt.scatter(df.measurement_13, df.measurement_15, c = df.loading,alpha=0.5)
- plt.savefig("scatter.png")
- df['failure'].value_counts().plot(kind='bar')
- plt.savefig("bar.png")
- sns.heatmap(df.select_dtypes(include="number").corr())
- plt.savefig("corr.png")
- plots = ["corr.png","scatter.png", "bar.png"]
- return plots
-
-inputs = [gr.Dataframe(label="Supersoaker Production Data")]
-outputs = [gr.Gallery(label="Profiling Dashboard").style(grid=(1,3))]
-
-gr.Interface(plot, inputs=inputs, outputs=outputs, examples=[df.head(100)], title="Supersoaker Failures Analysis Dashboard").launch()
\ No newline at end of file
diff --git a/spaces/merve/uncertainty-calibration/public/data-leak/players0.js b/spaces/merve/uncertainty-calibration/public/data-leak/players0.js
deleted file mode 100644
index 5f1640268c5aa31e0ed73ec7f763b4c64d65f587..0000000000000000000000000000000000000000
--- a/spaces/merve/uncertainty-calibration/public/data-leak/players0.js
+++ /dev/null
@@ -1,456 +0,0 @@
-var players0 = [
- [
- 1.305925030229746,
- 38.016928657799276
- ],
- [
- 20.894800483675937,
- 23.071342200725514
- ],
- [
- 24.232164449818622,
- 50.35066505441355
- ],
- [
- 37.29141475211608,
- 4.643288996372431
- ],
- [
- 57.89600967351874,
- 25.24788391777509
- ],
- [
- 41.20918984280532,
- 34.389359129383315
- ],
- [
- 42.51511487303507,
- 54.26844014510278
- ],
- [
- 31.77750906892382,
- 67.9081015719468
- ],
- [
- 63.84522370012092,
- 54.41354292623942
- ],
- [
- 70.37484885126965,
- 42.22490931076179
- ],
- [
- 39.32285368802902,
- 56.44498186215236
- ],
- [
- 35.550181378476424,
- 58.91172914147521
- ],
- [
- 46.57799274486094,
- 52.8174123337364
- ],
- [
- 39.6130592503023,
- 37.14631197097945
- ],
- [
- 42.51511487303507,
- 30.90689238210399
- ],
- [
- 50.64087061668682,
- 8.706166868198308
- ],
- [
- 71.10036275695285,
- 8.996372430471585
- ],
- [
- 75.01813784764208,
- 26.844014510278114
- ],
- [
- 77.3397823458283,
- 47.44860943168077
- ],
- [
- 76.17896009673518,
- 59.34703748488513
- ],
- [
- 105.05441354292624,
- 39.177750906892385
- ],
- [
- 59.34703748488513,
- 33.083434099153564
- ]
-]
-
-
-var players1 = [
- [
- 6.819830713422007,
- 27.569528415961305
- ],
- [
- 31.05199516324063,
- 30.03627569528416
- ],
- [
- 28.440145102781138,
- 43.24062877871826
- ],
- [
- 48.02902055622733,
- 13.639661426844015
- ],
- [
- 62.249093107617895,
- 35.69528415961306
- ],
- [
- 49.915356711003625,
- 26.553808948004836
- ],
- [
- 53.68802902055623,
- 47.88391777509069
- ],
- [
- 45.85247883917775,
- 54.123337363966144
- ],
- [
- 72.8415961305925,
- 46.57799274486094
- ],
- [
- 70.81015719467956,
- 23.216444981862153
- ],
- [
- 35.98548972188634,
- 44.11124546553809
- ],
- [
- 49.48004836759371,
- 59.92744860943168
- ],
- [
- 46.86819830713422,
- 45.417170495767834
- ],
- [
- 39.6130592503023,
- 37.14631197097945
- ],
- [
- 42.37001209189843,
- 24.812575574365177
- ],
- [
- 53.252720677146314,
- 9.721886336154776
- ],
- [
- 73.5671100362757,
- 8.996372430471585
- ],
- [
- 80.96735187424426,
- 26.698911729141475
- ],
- [
- 85.75574365175332,
- 37.43651753325272
- ],
- [
- 87.35187424425635,
- 47.88391777509069
- ],
- [
- 112.59975816203143,
- 31.77750906892382
- ],
- [
- 58.041112454655384,
- 25.97339782345828
- ]
-]
-
-var players2 = [
- [
- 22.6360338573156,
- 36.27569528415961
- ],
- [
- 49.48004836759371,
- 18.71825876662636
- ],
- [
- 43.82103990326481,
- 34.82466747279323
- ],
- [
- 94.89721886336154,
- 6.674727932285369
- ],
- [
- 103.31318016928658,
- 24.522370012091898
- ],
- [
- 82.12817412333736,
- 32.0677146311971
- ],
- [
- 52.8174123337364,
- 56.009673518742446
- ],
- [
- 91.26964933494558,
- 55.28415961305925
- ],
- [
- 99.68561064087062,
- 40.33857315598549
- ],
- [
- 105.19951632406288,
- 40.33857315598549
- ],
- [
- 53.542926239419586,
- 43.966142684401454
- ],
- [
- 49.48004836759371,
- 59.92744860943168
- ],
- [
- 58.18621523579202,
- 37.87182587666263
- ],
- [
- 86.91656590084644,
- 37.58162031438936
- ],
- [
- 59.34703748488513,
- 18.137847642079805
- ],
- [
- 96.34824667472793,
- 25.24788391777509
- ],
- [
- 90.97944377267231,
- 8.996372430471585
- ],
- [
- 104.47400241837968,
- 31.342200725513905
- ],
- [
- 109.8428053204353,
- 28.295042321644498
- ],
- [
- 105.05441354292624,
- 43.24062877871826
- ],
- [
- 116.2273276904474,
- 25.538089480048367
- ],
- [
- 86.62636033857315,
- 29.165659008464328
- ]
-]
-
-
-playersleakhigh = [
- [
- 2.71764705882353,
- 22
- ],
- [
- 38.11764705882353,
- 44.75294117647059
- ],
- [
- 31.058823529411764,
- 53.22352941176471
- ],
- [
- 52.94117647058824,
- 51.10588235294118
- ],
- [
- 58.023529411764706,
- 50.11764705882353
- ],
- [
- 46.305882352941175,
- 51.247058823529414
- ],
- [
- 46.023529411764706,
- 42.635294117647064
- ],
- [
- 41.082352941176474,
- 48.98823529411765
- ],
- [
- 49.411764705882355,
- 43.76470588235294
- ],
- [
- 59.71764705882353,
- 43.48235294117647
- ],
- [
- 39.32285368802902,
- 56.44498186215236
- ],
- [
- 67.76470588235294,
- 30.494117647058825
- ],
- [
- 78.07058823529412,
- 48.28235294117647
- ],
- [
- 69.60000000000001,
- 40.23529411764706
- ],
- [
- 76.09411764705882,
- 23.152941176470588
- ],
- [
- 85.9764705882353,
- 24.282352941176473
- ],
- [
- 84.56470588235294,
- 48.98823529411765
- ],
- [
- 74.68235294117648,
- 39.38823529411765
- ],
- [
- 79.3529411764706,
- 22
- ],
- [
- 93.1764705882353,
- 34.44705882352941
- ],
- [
- 86.68235294117648,
- 33.45882352941177
- ],
- [
- 81.74117647058824,
- 41.92941176470588
- ]
-]
-
-playersleaklow = [
- [
- 2.71764705882353,
- 73.12941176470588
- ],
- [
- 38.11764705882353,
- 44.75294117647059
- ],
- [
- 31.058823529411764,
- 53.22352941176471
- ],
- [
- 52.94117647058824,
- 51.10588235294118
- ],
- [
- 58.023529411764706,
- 50.11764705882353
- ],
- [
- 46.305882352941175,
- 51.247058823529414
- ],
- [
- 46.023529411764706,
- 42.635294117647064
- ],
- [
- 41.082352941176474,
- 48.98823529411765
- ],
- [
- 49.411764705882355,
- 43.76470588235294
- ],
- [
- 59.71764705882353,
- 43.48235294117647
- ],
- [
- 39.32285368802902,
- 56.44498186215236
- ],
- [
- 67.76470588235294,
- 30.494117647058825
- ],
- [
- 78.07058823529412,
- 48.28235294117647
- ],
- [
- 69.60000000000001,
- 40.23529411764706
- ],
- [
- 76.09411764705882,
- 23.152941176470588
- ],
- [
- 85.9764705882353,
- 24.282352941176473
- ],
- [
- 84.56470588235294,
- 48.98823529411765
- ],
- [
- 74.68235294117648,
- 39.38823529411765
- ],
- [
- 79.3529411764706,
- 72.70588235294117
- ],
- [
- 93.1764705882353,
- 34.44705882352941
- ],
- [
- 86.68235294117648,
- 33.45882352941177
- ],
- [
- 81.74117647058824,
- 41.92941176470588
- ]
-]
\ No newline at end of file
diff --git a/spaces/merve/uncertainty-calibration/source/uncertainty-calibration/weatherdata.js b/spaces/merve/uncertainty-calibration/source/uncertainty-calibration/weatherdata.js
deleted file mode 100644
index 9fb29abd04cf81496773adb6fbab7a1b9cb513e0..0000000000000000000000000000000000000000
--- a/spaces/merve/uncertainty-calibration/source/uncertainty-calibration/weatherdata.js
+++ /dev/null
@@ -1,255 +0,0 @@
-var weatherdata = [{'h': 0,
-'id': 0,
-'label': 0,
-'original_score': 0.12433152687398698,
-'score': 0.12433152687398698},
-{'h': 1,
-'id': 1,
-'label': 0,
-'original_score': 0.2014203772169771,
-'score': 0.2014203772169771},
-{'h': 2,
-'id': 2,
-'label': 1,
-'original_score': 0.2626685491019668,
-'score': 0.2626685491019668},
-{'h': 3,
-'id': 3,
-'label': 0,
-'original_score': 0.10619382887946915,
-'score': 0.10619382887946915},
-{'h': 4,
-'id': 4,
-'label': 0,
-'original_score': 0.1536112957212682,
-'score': 0.1536112957212682},
-{'h': 5,
-'id': 5,
-'label': 0,
-'original_score': 0.2660219680553572,
-'score': 0.2660219680553572},
-{'h': 6,
-'id': 6,
-'label': 0,
-'original_score': 0.1886698681338711,
-'score': 0.1886698681338711},
-{'h': 7,
-'id': 7,
-'label': 0,
-'original_score': 0.302266784816097,
-'score': 0.302266784816097},
-{'h': 8,
-'id': 8,
-'label': 0,
-'original_score': 0.15496114380196338,
-'score': 0.15496114380196338},
-{'h': 9,
-'id': 9,
-'label': 0,
-'original_score': 0.19763504609985533,
-'score': 0.19763504609985533},
-{'h': 0,
-'id': 10,
-'label': 0,
-'original_score': 0.38247000184830054,
-'score': 0.38247000184830054},
-{'h': 1,
-'id': 11,
-'label': 1,
-'original_score': 0.3363518147573557,
-'score': 0.3363518147573557},
-{'h': 2,
-'id': 12,
-'label': 1,
-'original_score': 0.4947967422959128,
-'score': 0.4947967422959128},
-{'h': 3,
-'id': 13,
-'label': 0,
-'original_score': 0.38675988136018435,
-'score': 0.38675988136018435},
-{'h': 4,
-'id': 14,
-'label': 0,
-'original_score': 0.3755618748258325,
-'score': 0.3755618748258325},
-{'h': 5,
-'id': 15,
-'label': 0,
-'original_score': 0.39394252133526547,
-'score': 0.39394252133526547},
-{'h': 6,
-'id': 16,
-'label': 1,
-'original_score': 0.47996692559311144,
-'score': 0.47996692559311144},
-{'h': 7,
-'id': 17,
-'label': 0,
-'original_score': 0.4520919890835573,
-'score': 0.4520919890835573},
-{'h': 8,
-'id': 18,
-'label': 0,
-'original_score': 0.49128398887598235,
-'score': 0.49128398887598235},
-{'h': 9,
-'id': 19,
-'label': 0,
-'original_score': 0.4934231460040127,
-'score': 0.4934231460040127},
-{'h': 0,
-'id': 20,
-'label': 1,
-'original_score': 0.6023370616966761,
-'score': 0.6023370616966761},
-{'h': 1,
-'id': 21,
-'label': 0,
-'original_score': 0.5588319919664324,
-'score': 0.5588319919664324},
-{'h': 2,
-'id': 22,
-'label': 1,
-'original_score': 0.5372993269470902,
-'score': 0.5372993269470902},
-{'h': 3,
-'id': 23,
-'label': 1,
-'original_score': 0.6056881032306126,
-'score': 0.6056881032306126},
-{'h': 4,
-'id': 24,
-'label': 1,
-'original_score': 0.5777333354677878,
-'score': 0.5777333354677878},
-{'h': 5,
-'id': 25,
-'label': 0,
-'original_score': 0.5684077659316352,
-'score': 0.5684077659316352},
-{'h': 6,
-'id': 26,
-'label': 0,
-'original_score': 0.5583886351009575,
-'score': 0.5583886351009575},
-{'h': 7,
-'id': 27,
-'label': 0,
-'original_score': 0.585107016245853,
-'score': 0.585107016245853},
-{'h': 4,
-'id': 28,
-'label': 0,
-'original_score': 0.5024398267017434,
-'score': 0.5024398267017434},
-{'h': 7,
-'id': 29,
-'label': 1,
-'original_score': 0.5119051369645927,
-'score': 0.5119051369645927},
-{'h': 0,
-'id': 30,
-'label': 1,
-'original_score': 0.6874421886689279,
-'score': 0.6874421886689279},
-{'h': 1,
-'id': 31,
-'label': 1,
-'original_score': 0.7622939478182656,
-'score': 0.7622939478182656},
-{'h': 2,
-'id': 32,
-'label': 1,
-'original_score': 0.8240376576917314,
-'score': 0.8240376576917314},
-{'h': 3,
-'id': 33,
-'label': 0,
-'original_score': 0.8491598185092843,
-'score': 0.8491598185092843},
-{'h': 4,
-'id': 34,
-'label': 1,
-'original_score': 0.7585879921321647,
-'score': 0.7585879921321647},
-{'h': 5,
-'id': 35,
-'label': 0,
-'original_score': 0.76396242565466,
-'score': 0.76396242565466},
-{'h': 6,
-'id': 36,
-'label': 1,
-'original_score': 0.7498984213509621,
-'score': 0.7498984213509621},
-{'h': 7,
-'id': 37,
-'label': 1,
-'original_score': 0.6642342379293016,
-'score': 0.6642342379293016},
-{'h': 8,
-'id': 38,
-'label': 0,
-'original_score': 0.7594027841393808,
-'score': 0.7594027841393808},
-{'h': 9,
-'id': 39,
-'label': 1,
-'original_score': 0.816737760918518,
-'score': 0.816737760918518},
-{'h': 0,
-'id': 40,
-'label': 1,
-'original_score': 0.8926172493334218,
-'score': 0.8926172493334218},
-{'h': 1,
-'id': 41,
-'label': 0,
-'original_score': 0.9194132577983325,
-'score': 0.9194132577983325},
-{'h': 2,
-'id': 42,
-'label': 1,
-'original_score': 0.8603862951854552,
-'score': 0.8603862951854552},
-{'h': 3,
-'id': 43,
-'label': 1,
-'original_score': 0.9093601089110575,
-'score': 0.9093601089110575},
-{'h': 4,
-'id': 44,
-'label': 1,
-'original_score': 0.9442430043437404,
-'score': 0.9442430043437404},
-{'h': 5,
-'id': 45,
-'label': 1,
-'original_score': 0.8778942613680896,
-'score': 0.8778942613680896},
-{'h': 6,
-'id': 46,
-'label': 1,
-'original_score': 0.8873305075007553,
-'score': 0.8873305075007553},
-{'h': 7,
-'id': 47,
-'label': 1,
-'original_score': 0.8786043110234295,
-'score': 0.8786043110234295},
-{'h': 8,
-'id': 48,
-'label': 1,
-'original_score': 0.8682870444345626,
-'score': 0.8682870444345626},
-{'h': 9,
-'id': 49,
-'label': 1,
-'original_score': 0.8698959578262738,
-'score': 0.8698959578262738}]
-
-
-weatherdata.forEach(d => {
- d.is_filter = d.label && Math.random() < .6
-})
\ No newline at end of file
diff --git a/spaces/mfrashad/CharacterGAN/models/biggan/__init__.py b/spaces/mfrashad/CharacterGAN/models/biggan/__init__.py
deleted file mode 100644
index 583509736f3503bc277d5d2e2a69f445f7df8517..0000000000000000000000000000000000000000
--- a/spaces/mfrashad/CharacterGAN/models/biggan/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-from pathlib import Path
-import sys
-
-module_path = Path(__file__).parent / 'pytorch_biggan'
-sys.path.append(str(module_path.resolve()))
-from pytorch_pretrained_biggan import *
-from pytorch_pretrained_biggan.model import GenBlock
-from pytorch_pretrained_biggan.file_utils import http_get, s3_get
\ No newline at end of file
diff --git a/spaces/michael135/dontalk/run.py b/spaces/michael135/dontalk/run.py
deleted file mode 100644
index 503c6065bd96c8739fb81c94485ea2f7c86441de..0000000000000000000000000000000000000000
--- a/spaces/michael135/dontalk/run.py
+++ /dev/null
@@ -1,55 +0,0 @@
-import os
-import subprocess
-import sys
-
-
-def on_start():
- print("---------------")
- print("Running script './on_start.sh' to download models ...")
- print("---------------")
- result = subprocess.run("./on_start.sh", shell=True, env=os.environ)
- if result.returncode != 0:
- raise RuntimeError(f"Error executing ./on_start.sh [exit code: {result.returncode}]")
-
-
-def start():
- print("---------------")
- print(f"Launching {'API server' if '--nowebui' in sys.argv else 'Web UI'} with arguments: {' '.join(sys.argv[1:])}")
- print("---------------")
- import webui # type: ignore # noqa
-
- if "--nowebui" in sys.argv:
- webui.api_only()
- else:
- webui.webui()
-
-
-def set_options():
- import torch # type: ignore # noqa
-
- if not torch.cuda.is_available():
- # If no GPU is available, uninstall xformers and apply "--precision full --no-half --use-cpu all" to sys.argv.
- os.system(f"{sys.executable} -m pip uninstall -y xformers")
- sys.argv.extend(
- [
- "--precision",
- "full",
- "--no-half",
- "--use-cpu",
- "all",
- ]
- )
- else:
- # Applies "--force-enable-xformers --xformers" to sys.argv when there's a GPU present.
- sys.argv.extend(["--force-enable-xformers", "--xformers"])
-
- is_shared_ui = str(os.environ.get("IS_SHARED_UI", "") or "").strip().lower() not in ("", "0", "false", "none", "no")
- if not is_shared_ui:
- # Provide access to extensions only if IS_SHARED_UI isn't set.
- sys.argv.extend(["--enable-insecure-extension-access"])
-
-
-if __name__ == "__main__":
- set_options()
- on_start()
- start()
diff --git a/spaces/michellemli/PINNACLE/README.md b/spaces/michellemli/PINNACLE/README.md
deleted file mode 100644
index 22f53c6400aa6dc56aee7d0c023ed9caacc0d1c4..0000000000000000000000000000000000000000
--- a/spaces/michellemli/PINNACLE/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: PINNACLE
-emoji: 📚
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.36.1
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/mikeee/gradio-chatinterface/README.md b/spaces/mikeee/gradio-chatinterface/README.md
deleted file mode 100644
index 6fe3b56b6786f0cc7709806ef009b91a33bd6e5d..0000000000000000000000000000000000000000
--- a/spaces/mikeee/gradio-chatinterface/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: gradio-interface-tryout
-emoji: 🚀
-colorFrom: yellow
-colorTo: gray
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app-org.py
-pinned: true
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/mikeee/gradio-chatinterface/app-org.py b/spaces/mikeee/gradio-chatinterface/app-org.py
deleted file mode 100644
index a9d97fdb2fdc8072e053cdc6db85bc6abefc9fae..0000000000000000000000000000000000000000
--- a/spaces/mikeee/gradio-chatinterface/app-org.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
-Try out gradio.Chatinterface.
-
-colab gradio-chatinterface.
-
-%%writefile reuirements.txt
-gradio
-transformers
-sentencepiece
-torch
-
-"""
-# pylint: disable=line-too-long, missing-module-docstring, missing-function-docstring
-# import torch
-from time import time
-
-import gradio as gr
-from about_time import about_time
-from examples_list import examples_list
-from transformers import AutoModel, AutoTokenizer # AutoModelForCausalLM,
-
-# device = "cuda" if torch.cuda.is_available() else "cpu"
-
-# tokenizer = AutoTokenizer.from_pretrained("stabilityai/StableBeluga2", use_fast=False)
-# model = AutoModelForCausalLM.from_pretrained("stabilityai/StableBeluga2", torch_dtype=torch.float16, low_cpu_mem_usage=True, device_map="auto")
-# system_prompt = "### System:\nYou are Stable Beluga, an AI that follows instructions extremely well. Help as much as you can. Remember, be safe, and don't do anything illegal.\n\n"
-# pipeline = pipeline(task="text-generation", model="meta-llama/Llama-2-7b")
-tokenizer = AutoTokenizer.from_pretrained(
- "THUDM/chatglm2-6b-int4", trust_remote_code=True
-)
-chat_model = AutoModel.from_pretrained(
- "THUDM/chatglm2-6b-int4", trust_remote_code=True # 3.92G
-).float()
-
-
-def chat(message, history):
- # prompt = f"{system_prompt}### User: {message}\n\n### Assistant:\n"
- # inputs = tokenizer(prompt, return_tensors="pt").to(device=device)
- # output = model.generate(**inputs, do_sample=True, top_p=0.95, top_k=0, max_new_tokens=256)
- # return tokenizer.decode(output[0], skip_special_tokens=True)
- flag = 1
- then = time()
- prefix = ""
- prelude = 0.0
- with about_time() as dur:
- for response, _ in chat_model.stream_chat(
- tokenizer, message, history, max_length=2048, top_p=0.7, temperature=0.95
- ):
- if flag:
- flag = 0
- prelude = time() - then
- prefix = f"{prelude:.2f}s"
- yield f"{prefix} {response}"
- suffix = f"\n(time elapsed: {dur.duration_human}, {(time() - prelude)/len(response):.2f}s/char)"
- yield f"{response}{suffix}"
-
-chatbot = gr.Chatbot([], label="Bot", height=450)
-textbox = gr.Textbox('', scale=10, label='', lines=2, placeholder="Ask me anything")
-submit_btn = gr.Button(value="▶️ Send", scale=1, min_width=0, variant="primary")
-
-interf = gr.ChatInterface(
- chat,
- chatbot=chatbot,
- textbox=textbox,
- submit_btn=submit_btn,
- title="gradio-chatinterface-tryout",
- examples=examples_list,
- theme=gr.themes.Glass(text_size="sm", spacing_size="sm"),
-).queue(max_size=5)
-
-
-if __name__ == "__main__":
- interf.launch(debug=True)
diff --git a/spaces/milyiyo/reimagine-it/captioning/utils/opts.py b/spaces/milyiyo/reimagine-it/captioning/utils/opts.py
deleted file mode 100644
index 778e512361727de0939bbd7b014e6eeb716a0c67..0000000000000000000000000000000000000000
--- a/spaces/milyiyo/reimagine-it/captioning/utils/opts.py
+++ /dev/null
@@ -1,412 +0,0 @@
-from __future__ import print_function
-import argparse
-
-
-def if_use_feat(caption_model):
- # Decide if load attention feature according to caption model
- if caption_model in ['show_tell', 'all_img', 'fc', 'newfc']:
- use_att, use_fc = False, True
- elif caption_model == 'language_model':
- use_att, use_fc = False, False
- elif caption_model in ['updown', 'topdown']:
- use_fc, use_att = True, True
- else:
- use_att, use_fc = True, False
- return use_fc, use_att
-
-import pprint
-class Config(object):
- def __init__(self, **kwargs):
- """Configuration Class: set kwargs as class attributes with setattr"""
- for k, v in kwargs.items():
- setattr(self, k, v)
-
- @property
- def config_str(self):
- return pprint.pformat(self.__dict__)
-
- def __repr__(self):
- """Pretty-print configurations in alphabetical order"""
- config_str = 'Configurations\n'
- config_str += self.config_str
- return config_str
-
-
-def parse_opt(parse=True, **optional_kwargs):
- parser = argparse.ArgumentParser()
- # Data input settings
- parser.add_argument('--input_json', type=str, default='data/coco.json',
- help='path to the json file containing additional info and vocab')
- parser.add_argument('--input_fc_dir', type=str, default='data/cocotalk_fc',
- help='path to the directory containing the preprocessed fc feats')
- parser.add_argument('--input_att_dir', type=str, default='data/cocotalk_att',
- help='path to the directory containing the preprocessed att feats')
- parser.add_argument('--input_box_dir', type=str, default='data/cocotalk_box',
- help='path to the directory containing the boxes of att feats')
- parser.add_argument('--input_label_h5', type=str, default='data/coco_label.h5',
- help='path to the h5file containing the preprocessed dataset')
- parser.add_argument('--data_in_memory', action='store_true',
- help='True if we want to save the features in memory')
- parser.add_argument('--start_from', type=str, default=None,
- help="""continue training from saved model at this path. Path must contain files saved by previous training process:
- 'infos.pkl' : configuration;
- 'model.pth' : weights
- """)
- parser.add_argument('--cached_tokens', type=str, default='coco-train-idxs',
- help='Cached token file for calculating cider score during self critical training.')
-
- # Model settings
- parser.add_argument('--caption_model', type=str, default="show_tell",
- help='show_tell, show_attend_tell, all_img, fc, att2in, att2in2, att2all2, adaatt, adaattmo, updown, stackatt, denseatt, transformer')
- parser.add_argument('--rnn_size', type=int, default=512,
- help='size of the rnn in number of hidden nodes in each layer')
- parser.add_argument('--num_layers', type=int, default=1,
- help='number of layers in the RNN')
- parser.add_argument('--rnn_type', type=str, default='lstm',
- help='rnn, gru, or lstm')
- parser.add_argument('--input_encoding_size', type=int, default=512,
- help='the encoding size of each token in the vocabulary, and the image.')
- parser.add_argument('--att_hid_size', type=int, default=512,
- help='the hidden size of the attention MLP; only useful in show_attend_tell; 0 if not using hidden layer')
- parser.add_argument('--fc_feat_size', type=int, default=2048,
- help='2048 for resnet, 4096 for vgg')
- parser.add_argument('--att_feat_size', type=int, default=2048,
- help='2048 for resnet, 512 for vgg')
- parser.add_argument('--logit_layers', type=int, default=1,
- help='number of layers in the RNN')
-
-
- parser.add_argument('--use_bn', type=int, default=0,
- help='If 1, then do batch_normalization first in att_embed, if 2 then do bn both in the beginning and the end of att_embed')
-
- # feature manipulation
- parser.add_argument('--norm_att_feat', type=int, default=0,
- help='If normalize attention features')
- parser.add_argument('--use_box', type=int, default=0,
- help='If use box features')
- parser.add_argument('--norm_box_feat', type=int, default=0,
- help='If use box, do we normalize box feature')
-
- # Optimization: General
- parser.add_argument('--max_epochs', type=int, default=-1,
- help='number of epochs')
- parser.add_argument('--batch_size', type=int, default=16,
- help='minibatch size')
- parser.add_argument('--grad_clip_mode', type=str, default='value',
- help='value or norm')
- parser.add_argument('--grad_clip_value', type=float, default=0.1,
- help='clip gradients at this value/max_norm, 0 means no clipping')
- parser.add_argument('--drop_prob_lm', type=float, default=0.5,
- help='strength of dropout in the Language Model RNN')
- parser.add_argument('--self_critical_after', type=int, default=-1,
- help='After what epoch do we start finetuning the CNN? (-1 = disable; never finetune, 0 = finetune from start)')
- parser.add_argument('--seq_per_img', type=int, default=5,
- help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive. E.g. coco has 5 sents/image')
-
- parser.add_argument('--verbose', type=int, default=0)
-
- # Sample related
- add_eval_sample_opts(parser)
-
- #Optimization: for the Language Model
- parser.add_argument('--optim', type=str, default='adam',
- help='what update to use? rmsprop|sgd|sgdmom|adagrad|adam|adamw')
- parser.add_argument('--learning_rate', type=float, default=4e-4,
- help='learning rate')
- parser.add_argument('--learning_rate_decay_start', type=int, default=-1,
- help='at what iteration to start decaying learning rate? (-1 = dont) (in epoch)')
- parser.add_argument('--learning_rate_decay_every', type=int, default=3,
- help='every how many iterations thereafter to drop LR?(in epoch)')
- parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8,
- help='every how many iterations thereafter to drop LR?(in epoch)')
- parser.add_argument('--optim_alpha', type=float, default=0.9,
- help='alpha for adam')
- parser.add_argument('--optim_beta', type=float, default=0.999,
- help='beta used for adam')
- parser.add_argument('--optim_epsilon', type=float, default=1e-8,
- help='epsilon that goes into denominator for smoothing')
- parser.add_argument('--weight_decay', type=float, default=0,
- help='weight_decay')
- # Transformer
- parser.add_argument('--label_smoothing', type=float, default=0,
- help='')
- parser.add_argument('--noamopt', action='store_true',
- help='')
- parser.add_argument('--noamopt_warmup', type=int, default=2000,
- help='')
- parser.add_argument('--noamopt_factor', type=float, default=1,
- help='')
- parser.add_argument('--reduce_on_plateau', action='store_true',
- help='')
- parser.add_argument('--reduce_on_plateau_factor', type=float, default=0.5,
- help='')
- parser.add_argument('--reduce_on_plateau_patience', type=int, default=3,
- help='')
- parser.add_argument('--cached_transformer', action='store_true',
- help='')
-
-
- parser.add_argument('--use_warmup', action='store_true',
- help='warm up the learing rate?')
-
- parser.add_argument('--scheduled_sampling_start', type=int, default=-1,
- help='at what iteration to start decay gt probability')
- parser.add_argument('--scheduled_sampling_increase_every', type=int, default=5,
- help='every how many iterations thereafter to gt probability')
- parser.add_argument('--scheduled_sampling_increase_prob', type=float, default=0.05,
- help='How much to update the prob')
- parser.add_argument('--scheduled_sampling_max_prob', type=float, default=0.25,
- help='Maximum scheduled sampling prob.')
-
-
- # Evaluation/Checkpointing
- parser.add_argument('--val_images_use', type=int, default=3200,
- help='how many images to use when periodically evaluating the validation loss? (-1 = all)')
- parser.add_argument('--save_checkpoint_every', type=int, default=2500,
- help='how often to save a model checkpoint (in iterations)?')
- parser.add_argument('--save_every_epoch', action='store_true',
- help='Save checkpoint every epoch, will overwrite save_checkpoint_every')
- parser.add_argument('--save_history_ckpt', type=int, default=0,
- help='If save checkpoints at every save point')
- parser.add_argument('--checkpoint_path', type=str, default=None,
- help='directory to store checkpointed models')
- parser.add_argument('--language_eval', type=int, default=0,
- help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
- parser.add_argument('--losses_log_every', type=int, default=25,
- help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)')
- parser.add_argument('--load_best_score', type=int, default=1,
- help='Do we load previous best score when resuming training.')
-
- # misc
- parser.add_argument('--id', type=str, default='',
- help='an id identifying this run/job. used in cross-val and appended when writing progress files')
- parser.add_argument('--train_only', type=int, default=0,
- help='if true then use 80k, else use 110k')
-
-
- # Reward
- parser.add_argument('--cider_reward_weight', type=float, default=1,
- help='The reward weight from cider')
- parser.add_argument('--bleu_reward_weight', type=float, default=0,
- help='The reward weight from bleu4')
-
- # Reward
- parser.add_argument('--clipscore_reward_weight', type=float, default=1,
- help='The reward weight from clipscore')
- parser.add_argument('--use_clipscore', type=float, default=0,
- help='Use CLIPScore')
- parser.add_argument('--clipscore_mode', type=str, default='clip_s',
- help='Which CLIPScore to use: clip_s|refclip_s')
-
-
- # Structure_loss
- parser.add_argument('--structure_loss_weight', type=float, default=1,
- help='')
- parser.add_argument('--structure_after', type=int, default=-1,
- help='T')
- parser.add_argument('--structure_loss_type', type=str, default='seqnll',
- help='')
- parser.add_argument('--struc_use_logsoftmax', action='store_true', help='')
- parser.add_argument('--entropy_reward_weight', type=float, default=0,
- help='Entropy reward, seems very interesting')
- parser.add_argument('--self_cider_reward_weight', type=float, default=0,
- help='self cider reward')
-
- # Used for self critical or structure. Used when sampling is need during training
- parser.add_argument('--train_sample_n', type=int, default=16,
- help='The reward weight from cider')
- parser.add_argument('--train_sample_method', type=str, default='sample',
- help='')
- parser.add_argument('--train_beam_size', type=int, default=1,
- help='')
-
- # Used for self critical
- parser.add_argument('--sc_sample_method', type=str, default='greedy',
- help='')
- parser.add_argument('--sc_beam_size', type=int, default=1,
- help='')
-
-
- # For diversity evaluation during training
- add_diversity_opts(parser)
-
-
- # config
- parser.add_argument('--cfg', type=str, default=None,
- help='configuration; similar to what is used in detectron')
- parser.add_argument(
- '--set_cfgs', dest='set_cfgs',
- help='Set config keys. Key value sequence seperate by whitespace.'
- 'e.g. [key] [value] [key] [value]\n This has higher priority'
- 'than cfg file but lower than other args. (You can only overwrite'
- 'arguments that have alerady been defined in config file.)',
- default=[], nargs='+')
- # How will config be used
- # 1) read cfg argument, and load the cfg file if it's not None
- # 2) Overwrite cfg argument with set_cfgs
- # 3) parse config argument to args.
- # 4) in the end, parse command line argument and overwrite args
-
- # step 1: read cfg_fn
- # args = parser.parse_args()
- # Parse the arguments.
- if parse:
- args = parser.parse_args()
- # For interative engironmnet (ex. jupyter)
- else:
- args = parser.parse_known_args()[0]
- # print(args)
-
- # Namespace => Dictionary
- kwargs = vars(args)
- # for k, v in optional_kwargs.items():
- # setattr(args, k, v)
- kwargs.update(optional_kwargs)
-
- args = Config(**kwargs)
-
-
- if args.cfg is not None or args.set_cfgs is not None:
- from .config import CfgNode
- if args.cfg is not None:
- # print('Read Cfg')
- cn = CfgNode(CfgNode.load_yaml_with_base(args.cfg))
- # print(cn)
- else:
- cn = CfgNode()
- if args.set_cfgs is not None:
- cn.merge_from_list(args.set_cfgs)
- for k,v in cn.items():
- if not hasattr(args, k):
- import os
- if 'LOCAL_RANK' in os.environ and os.environ['LOCAL_RANK'] != '0':
- pass
- else:
- print('Warning: key %s not in args' % k)
-
- setattr(args, k, v)
-
- if parse:
- args = parser.parse_args(namespace=args)
- else:
- args = parser.parse_known_args(namespace=args)[0]
-
- # Check if args are valid
- assert args.rnn_size > 0, "rnn_size should be greater than 0"
- assert args.num_layers > 0, "num_layers should be greater than 0"
- assert args.input_encoding_size > 0, "input_encoding_size should be greater than 0"
- assert args.batch_size > 0, "batch_size should be greater than 0"
- assert args.drop_prob_lm >= 0 and args.drop_prob_lm < 1, "drop_prob_lm should be between 0 and 1"
- assert args.seq_per_img > 0, "seq_per_img should be greater than 0"
- assert args.beam_size > 0, "beam_size should be greater than 0"
- assert args.save_checkpoint_every > 0, "save_checkpoint_every should be greater than 0"
- assert args.losses_log_every > 0, "losses_log_every should be greater than 0"
- assert args.language_eval == 0 or args.language_eval == 1, "language_eval should be 0 or 1"
- assert args.load_best_score == 0 or args.load_best_score == 1, "language_eval should be 0 or 1"
- assert args.train_only == 0 or args.train_only == 1, "language_eval should be 0 or 1"
-
- # default value for start_from and checkpoint_path
- args.checkpoint_path = args.checkpoint_path or './log_%s' %args.id
- args.start_from = args.start_from or args.checkpoint_path
-
- # Deal with feature things before anything
- args.use_fc, args.use_att = if_use_feat(args.caption_model)
- if args.use_box: args.att_feat_size = args.att_feat_size + 5
-
- return args
-
-
-def add_eval_options(parser):
- # Basic options
- parser.add_argument('--batch_size', type=int, default=0,
- help='if > 0 then overrule, otherwise load from checkpoint.')
- parser.add_argument('--num_images', type=int, default=-1,
- help='how many images to use when periodically evaluating the loss? (-1 = all)')
- parser.add_argument('--language_eval', type=int, default=0,
- help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
- parser.add_argument('--dump_images', type=int, default=1,
- help='Dump images into vis/imgs folder for vis? (1=yes,0=no)')
- parser.add_argument('--dump_json', type=int, default=1,
- help='Dump json with predictions into vis folder? (1=yes,0=no)')
- parser.add_argument('--dump_path', type=int, default=0,
- help='Write image paths along with predictions into vis json? (1=yes,0=no)')
-
- # Sampling options
- add_eval_sample_opts(parser)
-
- # For evaluation on a folder of images:
- parser.add_argument('--image_folder', type=str, default='',
- help='If this is nonempty then will predict on the images in this folder path')
- parser.add_argument('--image_root', type=str, default='',
- help='In case the image paths have to be preprended with a root path to an image folder')
- # For evaluation on MSCOCO images from some split:
- parser.add_argument('--input_fc_dir', type=str, default='',
- help='path to the h5file containing the preprocessed dataset')
- parser.add_argument('--input_att_dir', type=str, default='',
- help='path to the h5file containing the preprocessed dataset')
- parser.add_argument('--input_box_dir', type=str, default='',
- help='path to the h5file containing the preprocessed dataset')
- parser.add_argument('--input_label_h5', type=str, default='',
- help='path to the h5file containing the preprocessed dataset')
- parser.add_argument('--input_json', type=str, default='',
- help='path to the json file containing additional info and vocab. empty = fetch from model checkpoint.')
- parser.add_argument('--split', type=str, default='test',
- help='if running on MSCOCO images, which split to use: val|test|train')
- parser.add_argument('--coco_json', type=str, default='',
- help='if nonempty then use this file in DataLoaderRaw (see docs there). Used only in MSCOCO test evaluation, where we have a specific json file of only test set images.')
- # misc
- parser.add_argument('--id', type=str, default='',
- help='an id identifying this run/job. used only if language_eval = 1 for appending to intermediate files')
- parser.add_argument('--verbose_beam', type=int, default=1,
- help='if we need to print out all beam search beams.')
- parser.add_argument('--verbose_loss', type=int, default=0,
- help='If calculate loss using ground truth during evaluation')
-
-def add_diversity_opts(parser):
- parser.add_argument('--sample_n', type=int, default=1,
- help='Diverse sampling')
- parser.add_argument('--sample_n_method', type=str, default='sample',
- help='sample, bs, dbs, gumbel, topk, dgreedy, dsample, dtopk, dtopp')
- parser.add_argument('--eval_oracle', type=int, default=1,
- help='if we need to calculate loss.')
-
-
-# Sampling related options
-def add_eval_sample_opts(parser):
- parser.add_argument('--sample_method', type=str, default='greedy',
- help='greedy; sample; gumbel; top, top<0-1>')
- parser.add_argument('--beam_size', type=int, default=1,
- help='used when sample_method = greedy, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.')
- parser.add_argument('--max_length', type=int, default=20,
- help='Maximum length during sampling')
- parser.add_argument('--length_penalty', type=str, default='',
- help='wu_X or avg_X, X is the alpha')
- parser.add_argument('--group_size', type=int, default=1,
- help='used for diverse beam search. if group_size is 1, then it\'s normal beam search')
- parser.add_argument('--diversity_lambda', type=float, default=0.5,
- help='used for diverse beam search. Usually from 0.2 to 0.8. Higher value of lambda produces a more diverse list')
- parser.add_argument('--temperature', type=float, default=1.0,
- help='temperature when sampling from distributions (i.e. when sample_method = sample). Lower = "safer" predictions.')
- parser.add_argument('--decoding_constraint', type=int, default=0,
- help='If 1, not allowing same word in a row')
- parser.add_argument('--block_trigrams', type=int, default=0,
- help='block repeated trigram.')
- parser.add_argument('--remove_bad_endings', type=int, default=0,
- help='Remove bad endings')
- parser.add_argument('--suppress_UNK', type=int, default=1,
- help='Not predicting UNK')
-
-
-if __name__ == '__main__':
- import sys
- sys.argv = [sys.argv[0]]
- args = parse_opt()
- print(args)
- print()
- sys.argv = [sys.argv[0], '--cfg', 'configs/updown_long.yml']
- args1 = parse_opt()
- print(dict(set(vars(args1).items()) - set(vars(args).items())))
- print()
- sys.argv = [sys.argv[0], '--cfg', 'configs/updown_long.yml', '--caption_model', 'att2in2']
- args2 = parse_opt()
- print(dict(set(vars(args2).items()) - set(vars(args1).items())))
diff --git a/spaces/mohamedabdullah/Arabic-Spelling-Checker/app.py b/spaces/mohamedabdullah/Arabic-Spelling-Checker/app.py
deleted file mode 100644
index b898e3cf652ee2ad4fac2bbfd4c39c1d6c7f1e6b..0000000000000000000000000000000000000000
--- a/spaces/mohamedabdullah/Arabic-Spelling-Checker/app.py
+++ /dev/null
@@ -1,218 +0,0 @@
-import gradio as gr
-from datasets import load_dataset
-import re
-import numpy as np
-
-dataset = load_dataset("mohamedabdullah/Arabic-unique-words", data_files="ar_vocab.txt")
-word_l = re.findall('[^a-zA-Z0-9\s\W]{2,25}', dataset['train']['text'][0])
-vocab = set(word_l)
-
-def delete_letter(word):
- return [word[:i]+word[i+1:] for i in range(len(word))]
-
-def switch_letter(word):
- switch_l = []
-
- for i in range(len(word)-1):
- w_l = re.findall('\w', word)
- if i-1 < 0:
- w_l[i:i+2] = w_l[i+1::-1]
- else:
- w_l[i:i+2] = w_l[i+1:i-1:-1]
-
- switch_l.append(''.join(w_l))
-
- return switch_l
-
-def replace_letter(word):
- letters = 'ابتةثجحخدذرزسشصضطظعغفقكلمنهويءآأؤإئ'
-
- replace_set = set()
-
- for i in range(len(word)):
- for l in letters:
- new_word = word[:i]+l+word[i+1:]
- if new_word == word:
- continue
- replace_set.add(new_word)
-
- replace_l = sorted(list(replace_set))
-
- return replace_l
-
-def insert_letter(word):
- letters = 'ابتةثجحخدذرزسشصضطظعغفقكلمنهويءآأؤإئ'
- insert_l = []
-
- for i in range(len(word)+1):
- for l in letters:
- new_word = word[:i]+l+word[i:]
- insert_l.append(new_word)
-
- return insert_l
-
-def edit_one_letter(word, allow_switches = True):
- edit_one_set = delete_letter(word)+insert_letter(word)+replace_letter(word)
-
- if allow_switches:
- edit_one_set += switch_letter(word)
-
- return set(edit_one_set)
-
-def edit_two_letters(word, allow_switches = True):
- edit_two_set = []
- edit_one_set = edit_one_letter(word)
-
- for edit in edit_one_set:
- edit_two_set += edit_one_letter(edit)
-
- return set(edit_two_set) | set(edit_one_set)
-
-def get_corrections(word, vocab):
- suggestions = []
-
- correct_word_suggest = [word] if word in vocab else []
- edit_one_letter_suggest = list(filter(lambda item: item in vocab, list(edit_one_letter(word))))
- edit_two_letter_suggest = list(filter(lambda item: item in vocab, list(edit_two_letters(word))))
-
- suggestions = correct_word_suggest or edit_one_letter_suggest or edit_two_letter_suggest or ['لم يتم العثور علي إقتراحات مناسبة لهذه الكلمة']
-
- return set(suggestions)
-
-def min_edit_distance(source, target, ins_cost = 1, del_cost = 1, rep_cost = 2):
- m = len(source)
- n = len(target)
- D = np.zeros((m+1, n+1), dtype=int)
-
- for row in range(1, m+1):
- D[row,0] = D[row-1,0]+del_cost
-
- for col in range(1, n+1):
- D[0,col] = D[0, col-1]+ins_cost
-
- for row in range(1, m+1):
- for col in range(1, n+1):
- r_cost = rep_cost
-
- if source[row-1] == target[col-1]:
- r_cost = 0
-
- D[row,col] = np.min([D[row-1,col]+del_cost, D[row,col-1]+ins_cost, D[row-1,col-1]+r_cost])
-
- med = D[m,n]
-
- return med
-
-def get_suggestions(corrections, word):
- distance = []
- suggest = []
-
- for correction in corrections:
- source = word
- target = correction
- min_edits = min_edit_distance(source, target)
-
- distance.append(min_edits)
- suggest.append(correction)
-
- suggest_result = list(map(lambda idx: suggest[idx], np.argsort(distance)))
- return suggest_result
-
-def ar_spelling_checker(text):
- word_l = re.findall('\w{3,}', text)
- result = {}
-
- for word in word_l:
- tmp_corrections = []
- if not word in vocab:
- tmp_corrections = get_corrections(word, vocab)
- if len(tmp_corrections) == 0:
- continue
- result[word] = get_suggestions(tmp_corrections, word)
-
- output = ''''''
-
- output += '
Web-based app to detect spelling mistakes in Arabic words using dynamic programming
')
- text = gr.Textbox(label="النص", elem_id="input")
- btn = gr.Button("Spelling Check")
- output = gr.HTML()
-
- btn.click(ar_spelling_checker, [text], output)
-
-demo.launch(inline=False)
\ No newline at end of file
diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/speech_to_text/utils.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/models/speech_to_text/utils.py
deleted file mode 100644
index 168b8bf13b0e734eee3f6989ff0f28a016a09c2b..0000000000000000000000000000000000000000
--- a/spaces/mshukor/UnIVAL/fairseq/fairseq/models/speech_to_text/utils.py
+++ /dev/null
@@ -1,563 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-
-
-import logging
-from collections.abc import Iterable
-from itertools import repeat
-from typing import List, Optional, Tuple
-
-import torch
-from torch import Tensor
-
-
-# ------------------------------------------------------------------------------
-# assert_equal()
-# ------------------------------------------------------------------------------
-
-
-def assert_equal(value1, value2, name1=None, name2=None):
- """Asserts two values are equal otherwise raise an error."""
-
- str_name1 = "" if name1 is None else "{} ".format(name1)
- str_name2 = "" if name2 is None else "{} ".format(name2)
- if value1 != value2:
- str_value1 = "{}" if name1 is None else "({})"
- str_value1 = str_value1.format(value1)
- str_value2 = "{}" if name2 is None else "({})"
- str_value2 = str_value2.format(value2)
- raise ValueError(
- "Expected {}{} == {}{}".format(str_name1, str_value1, str_name2, str_value2)
- )
-
-
-def fill_config(config, key, value):
- if value is not None:
- if key not in config or config[key] is None:
- config[key] = value
- assert_equal(value, config[key], "value", f'config["{key}"]')
-
-
-# ------------------------------------------------------------------------------
-# check_and_return_expected()
-# ------------------------------------------------------------------------------
-
-
-def check_and_return_expected(value, undefined_value, expected_value, name=None):
- """
- Return the expected value while checking if the given value is undefined or
- equal to the expected value.
- """
- if (undefined_value is None and value is None) or (undefined_value == value):
- return expected_value
- if value != expected_value:
- str_name = "" if name is None else "{} ".format(name)
- str_value = "{}" if name is None else "({})"
- str_value = str_value.format(value)
- raise ValueError(
- "Expected {}{} == {}".format(str_name, str_value, expected_value)
- )
- return expected_value
-
-
-# ------------------------------------------------------------------------------
-# get_time_axis()
-# ------------------------------------------------------------------------------
-
-
-def get_time_axis(layout):
- """
- Extract the time axis from the layout, for example for breaking sequence into
- segments.
- """
- if layout in ["TB", "TBD"]:
- return 0
- if layout in ["BT", "BTD"]:
- return 1
- if layout in ["BCTD"]:
- return 2
- raise ValueError("Unsupported layout = {}".format(layout))
-
-
-# ------------------------------------------------------------------------------
-# get_batch_axis()
-# ------------------------------------------------------------------------------
-
-
-def get_batch_axis(layout):
- """
- Extract the batch axis from the layout
- """
- if layout in ["TB", "TBD"]:
- return 1
- if layout in ["BT", "BTD", "BCTD"]:
- return 0
- raise ValueError("Unsupported layout = {}".format(layout))
-
-
-# ------------------------------------------------------------------------------
-# monotonically_increasing_and_bounded()
-# ------------------------------------------------------------------------------
-
-
-def monotonically_increasing_and_bounded(iterable, min=None, max=None):
- """
- Check if the elements in the given iterable are monotonically increasing and
- bounded by upper/lower bounds.
- """
- if not isinstance(iterable, Iterable):
- raise TypeError(
- "Expected iterable to be of type Iterable, got ({})".format(
- iterable.__class__.__name__
- )
- )
- for i in range(len(iterable)):
- if min is not None and iterable[i] < min:
- return False
- if max is not None and iterable[i] > max:
- return False
- if i > 0 and iterable[i] <= iterable[i - 1]:
- return False
- return True
-
-
-# ------------------------------------------------------------------------------
-# to_pair()
-# ------------------------------------------------------------------------------
-
-
-def to_pair(value, name):
- """Make a pair (of type tuple) of given value."""
- if isinstance(value, Iterable):
- if len(value) != 2:
- raise ValueError(
- "Expected `{}` to have exactly 2 elements, got: ({})".format(
- name, value
- )
- )
- return value
- return tuple(repeat(value, 2))
-
-
-# ------------------------------------------------------------------------------
-# infer_conv_output_attrs()
-# ------------------------------------------------------------------------------
-
-
-# TODO(cfyeh): figure out if we can get `output_dim` without calling the module.
-def infer_conv_output_attrs(
- module, input_channels, input_dim, batch_size=1, max_length=8
-):
- """Get output attributes of a module with input."""
- input = torch.randn(batch_size, input_channels, max_length, input_dim)
- output = module(input)
- output_channels = output.shape[1]
- output_dim = output.shape[-1]
- return output_channels, output_dim
-
-
-# ------------------------------------------------------------------------------
-# NoOp
-# ------------------------------------------------------------------------------
-
-
-class NoOp(torch.nn.Module):
- """
- NoOp simply passes the input as the output.
- """
-
- def __init__(self):
- super().__init__()
-
- def forward(self, input: Tensor) -> Tensor:
- return input
-
-
-# ------------------------------------------------------------------------------
-# Permute: a torch.nn.Module applies permutation on the input tensor.
-# ------------------------------------------------------------------------------
-
-
-class Permute(torch.nn.Module):
- def __init__(self, dims):
- super().__init__()
- self.dims = dims
-
- def forward(self, input: Tensor) -> Tensor:
- return input.permute(self.dims).contiguous()
-
-
-# ------------------------------------------------------------------------------
-# lengths_to_padding_mask()
-# ------------------------------------------------------------------------------
-
-
-def lengths_to_padding_mask(lengths: Tensor) -> Tensor:
- """Convert lengths of shape (B, ) to padding mask."""
- batch_size = lengths.shape[0]
- max_length = int(torch.max(lengths).item())
- padding_mask = torch.arange( # [0, ..., T-1]
- max_length, device=lengths.device, dtype=lengths.dtype
- ).expand(batch_size, max_length) >= lengths.unsqueeze(1)
-
- return padding_mask
-
-
-# ------------------------------------------------------------------------------
-# lengths_to_attention_mask()
-# ------------------------------------------------------------------------------
-
-
-def lengths_to_attention_mask(
- lengths: Tensor,
- left_context: Optional[int] = None,
- right_context: Optional[int] = None,
-) -> Optional[Tensor]:
- """
- Generate attention mask based on (lengths, left_context, right_context).
- left_context is None means unlimited left context.
- right_context is None means unlimited right context.
- """
-
- if left_context is None and right_context is None:
- return None
-
- max_length = int(torch.max(lengths).item())
-
- # For example, with `max_length` == 5,
- # indices = tensor([
- # [ 0, 1, 2, 3, 4, 5],
- # [-1, 0, 1, 2, 3, 4],
- # [-2, -1, 0, 1, 2, 3],
- # [-3, -2, -1, 0, 1, 2],
- # [-4, -3, -2, -1, 0, 1],
- # [-5, -4, -3, -2, -1, 0],
- # ])
-
- # In some cases the second torch.arange is created on cpu which causes a
- # failure. Adding the device option to guard against it.
- indices = torch.arange(
- max_length, device=lengths.device, dtype=lengths.dtype
- ).expand(max_length, max_length) - torch.arange(
- max_length, device=lengths.device
- ).view(
- max_length, -1
- )
-
- # For example, with `max_length` == 5,
- # bool_mask = tensor([
- # [True, True, True, True, True],
- # [True, True, True, True, True],
- # [True, True, True, True, True],
- # [True, True, True, True, True],
- # [True, True, True, True, True],
- # ])
- bool_mask = (
- torch.tensor([True]).to(device=lengths.device).expand(max_length, max_length)
- )
-
- # For example, with `max_length` == 5, left_context == 2
- # left_mask = tensor([
- # [ True, True, True, True, True],
- # [ True, True, True, True, True],
- # [ True, True, True, True, True],
- # [False, True, True, True, True],
- # [False, False, True, True, True],
- # ])
- if left_context is not None:
- left_mask = indices >= -left_context
- bool_mask = bool_mask & left_mask
-
- # For example, with `max_length` == 5, right_context == 1
- # right_mask = tensor([
- # [True, True, False, False, False],
- # [True, True, True, False, False],
- # [True, True, True, True, False],
- # [True, True, True, True, True],
- # [True, True, True, True, True],
- # ])
- if right_context is not None:
- right_mask = indices <= right_context
- bool_mask = bool_mask & right_mask
-
- bool_mask = (~bool_mask).to(device=lengths.device)
- return bool_mask
-
-
-# ------------------------------------------------------------------------------
-# infer_output_norm()
-# ------------------------------------------------------------------------------
-
-
-def infer_output_norm(module, output_norm=None):
- """
- Infer the output norm (string and module) needed on the module gvien desired
- output normalization.
- """
- if output_norm == module.output_norm():
- # output_norm already matches module.output_norm().
- return (None, NoOp())
-
- if output_norm is None and module.output_norm() is not None:
- logger = logging.getLogger("infer_output_norm()")
- logger.warning(
- "trying to set output_norm ({}) ".format(output_norm)
- + "but got module.output_norm() ({}), ".format(module.output_norm())
- + "the combined output_norm() will be ({})".format(module.output_norm())
- )
- return (None, NoOp())
-
- if output_norm == "log_softmax":
- if module.output_norm() is not None:
- raise ValueError(
- "incompatible output_norm ({}) ".format(output_norm)
- + "and module.output_norm() ({})".format(module.output_norm())
- )
- else:
- return ("log_softmax", torch.nn.LogSoftmax(dim=-1))
-
- if output_norm == "softmax":
- if module.output_norm() is not None:
- raise ValueError(
- "incompatible output_norm ({}) ".format(output_norm)
- + "and module.output_norm() ({})".format(module.output_norm())
- )
- else:
- return ("softmax", torch.nn.Softmax(dim=-1))
-
- raise ValueError(
- "output_norm ({}) not in ".format(output_norm)
- + "supported list = [None, softmax, log_softmax]"
- )
-
-
-# ------------------------------------------------------------------------------
-# infer_channels_from_layout()
-# ------------------------------------------------------------------------------
-
-
-def infer_channels_from_layout(layout, channels):
- """Extract the number of channels from the layout."""
- if layout in ("TBD", "BTD"):
- if channels is not None and channels != 1:
- raise ValueError(
- "Expected channels ({}) to be 1 for layout = {}".format(
- channels, layout
- )
- )
- if channels is None:
- return 1
- return channels
-
-
-# ------------------------------------------------------------------------------
-# pad_sequence()
-# ------------------------------------------------------------------------------
-
-
-@torch.jit.export
-def pad_sequence(
- sequence: Tensor,
- time_axis: int,
- extra_left_context: int = 0,
- extra_right_context: int = 0,
-) -> Tensor:
- """Pad extra left/right contexts to the sequence."""
-
- if extra_left_context == 0 and extra_right_context == 0:
- return sequence
-
- tensors_to_concat = []
-
- if extra_left_context:
- size = (extra_left_context,)
- fill_value = 0
- indices = torch.full(
- size=size,
- fill_value=fill_value,
- dtype=torch.long,
- device=sequence.device,
- )
- left_padding = torch.index_select(sequence, time_axis, indices)
- tensors_to_concat.append(left_padding)
-
- tensors_to_concat.append(sequence)
-
- # NOTE(cfyeh): for efficiency reason we pad 0 instead of the last frame for
- # extra right contexts.
- if extra_right_context:
- size = list(sequence.shape)
- size[time_axis] = extra_right_context
- right_padding = torch.zeros(size, dtype=sequence.dtype, device=sequence.device)
- tensors_to_concat.append(right_padding)
-
- padded_sequence = torch.cat(tensors_to_concat, dim=time_axis)
- return padded_sequence
-
-
-# ------------------------------------------------------------------------------
-# sequence_to_segments()
-# ------------------------------------------------------------------------------
-
-
-@torch.jit.export
-def sequence_to_segments(
- sequence: Tensor,
- time_axis: int,
- lengths: Tensor,
- segment_size: Optional[int] = None,
- extra_left_context: int = 0,
- extra_right_context: int = 0,
-) -> List[Tuple[Tensor, Tensor]]:
- """Breaks sequence into segments."""
-
- sequence = pad_sequence(
- sequence=sequence,
- time_axis=time_axis,
- extra_left_context=extra_left_context,
- extra_right_context=extra_right_context,
- )
-
- lengths = lengths + extra_left_context + extra_right_context
-
- segments: List[Tuple[Tensor, Tensor]] = []
-
- if segment_size is None:
- segments.append((sequence, lengths))
- return segments
-
- offset = 0
- end = sequence.shape[time_axis]
- step = segment_size
- size = extra_left_context + segment_size + extra_right_context
-
- while offset + extra_left_context + extra_right_context < end:
- clamped_size = min(size, end - offset)
- segment_lengths = torch.clamp(lengths - offset, min=0, max=clamped_size)
- indices = torch.arange(
- start=offset,
- end=(offset + clamped_size),
- step=1,
- dtype=torch.long,
- device=sequence.device,
- )
- segment_tensor = torch.index_select(sequence, time_axis, indices)
- segments.append((segment_tensor, segment_lengths))
- offset = offset + step
-
- return segments
-
-
-# ------------------------------------------------------------------------------
-# segments_to_sequence()
-# ------------------------------------------------------------------------------
-
-
-@torch.jit.export
-def segments_to_sequence(
- segments: List[Tuple[Tensor, Tensor]], time_axis: int
-) -> Tuple[Tensor, Tensor]:
- """Concatenate segments into a full sequence."""
- if len(segments) == 1:
- return segments[0]
-
- tensors_to_concat: List[Tensor] = []
- lengths_to_stack: List[Tensor] = []
-
- for tensor, lengths in segments:
- tensors_to_concat.append(tensor)
- lengths_to_stack.append(lengths)
-
- sequence = torch.cat(tensors_to_concat, dim=time_axis)
- lengths = torch.stack(lengths_to_stack, dim=0)
- lengths = torch.sum(lengths, dim=0)
-
- return sequence, lengths
-
-
-def lengths_to_encoder_padding_mask(lengths, batch_first: bool = False):
- """
- convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
-
- Args:
- lengths: a (B, )-shaped tensor
- batch_first: whether to return a (B, T) tensor
-
- Return:
- max_length: maximum length of B sequences
- encoder_padding_mask: a (max_length, B) binary mask, where
- [t, b] = False for t < lengths[b] and True otherwise
-
- TODO:
- kernelize this function if benchmarking shows this function is slow
- """
- max_lengths = torch.max(lengths).item()
- bsz = lengths.size(0)
- encoder_padding_mask = torch.arange(
- max_lengths
- ).to( # a (T, ) tensor with [0, ..., T-1]
- lengths.device
- ).view( # move to the right device
- 1, max_lengths
- ).expand( # reshape to (1, T)-shaped tensor
- bsz, -1
- ) > lengths.view( # expand to (B, T)-shaped tensor
- bsz, 1
- ).expand(
- -1, max_lengths
- )
- if not batch_first:
- return encoder_padding_mask.t(), max_lengths
- else:
- return encoder_padding_mask, max_lengths
-
-
-# ------------------------------------------------------------------------------
-# attention suppression
-# ------------------------------------------------------------------------------
-
-
-def attention_suppression(attention_weights: Tensor, scale: float):
- # B, H, qlen, klen -> B, H, qlen, 1
- attention_prob = torch.nn.functional.softmax(attention_weights.float(), dim=-1)
- attention_nozeros = attention_prob.to(torch.bool)
- nozeros_sum = torch.sum(attention_nozeros.to(torch.float), dim=-1, keepdim=True)
-
- # For very sparse situation, we need get round about 0s
- key_sum = torch.sum(attention_prob, dim=-1, keepdim=True)
-
- # nozeros_sum should > 1
- key_mean = key_sum / (nozeros_sum + 1e-8)
-
- # std calculation
- dis = (attention_prob - key_mean) * (attention_prob - key_mean)
-
- # if attention_prob[i] < threshold, then dis_masked[i] = 0; for all i
- dis_masked = torch.where(
- attention_nozeros, dis, attention_prob.new_zeros(attention_prob.size())
- )
-
- key_var = torch.sum(dis_masked, dim=-1, keepdim=True)
- key_var = key_var / (nozeros_sum - 1.0 + 1e-8)
- key_std = torch.sqrt(key_var)
- key_thread = key_mean - scale * key_std
-
- # if attention_prob[i] >= key_thread, then attention_prob[i]
- # , otherwise "-inf"
- inf_tensor = attention_prob.new_zeros(attention_prob.size()).detach()
- inf_tensor[:] = float("-inf")
- attention_weights_float = torch.where(
- attention_prob < key_thread,
- inf_tensor,
- attention_weights.float(),
- )
-
- return attention_weights_float.type_as(attention_weights)
-
-
-def layer_norm_backward_hook(module, grad_input, grad_output, clamp_value):
- return tuple(torch.clamp(v, min=-clamp_value, max=clamp_value) for v in grad_input)
diff --git a/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/dist/reveal.css b/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/dist/reveal.css
deleted file mode 100644
index 5f80fd09615332cfbaa05b344e8591604e380e29..0000000000000000000000000000000000000000
--- a/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/dist/reveal.css
+++ /dev/null
@@ -1,8 +0,0 @@
-/*!
-* reveal.js 4.3.1
-* https://revealjs.com
-* MIT licensed
-*
-* Copyright (C) 2011-2022 Hakim El Hattab, https://hakim.se
-*/
-.reveal .r-stretch,.reveal .stretch{max-width:none;max-height:none}.reveal pre.r-stretch code,.reveal pre.stretch code{height:100%;max-height:100%;box-sizing:border-box}.reveal .r-fit-text{display:inline-block;white-space:nowrap}.reveal .r-stack{display:grid}.reveal .r-stack>*{grid-area:1/1;margin:auto}.reveal .r-hstack,.reveal .r-vstack{display:flex}.reveal .r-hstack img,.reveal .r-hstack video,.reveal .r-vstack img,.reveal .r-vstack video{min-width:0;min-height:0;-o-object-fit:contain;object-fit:contain}.reveal .r-vstack{flex-direction:column;align-items:center;justify-content:center}.reveal .r-hstack{flex-direction:row;align-items:center;justify-content:center}.reveal .items-stretch{align-items:stretch}.reveal .items-start{align-items:flex-start}.reveal .items-center{align-items:center}.reveal .items-end{align-items:flex-end}.reveal .justify-between{justify-content:space-between}.reveal .justify-around{justify-content:space-around}.reveal .justify-start{justify-content:flex-start}.reveal .justify-center{justify-content:center}.reveal .justify-end{justify-content:flex-end}html.reveal-full-page{width:100%;height:100%;height:100vh;height:calc(var(--vh,1vh) * 100);overflow:hidden}.reveal-viewport{height:100%;overflow:hidden;position:relative;line-height:1;margin:0;background-color:#fff;color:#000}.reveal-viewport:-webkit-full-screen{top:0!important;left:0!important;width:100%!important;height:100%!important;transform:none!important}.reveal-viewport:-ms-fullscreen{top:0!important;left:0!important;width:100%!important;height:100%!important;transform:none!important}.reveal-viewport:fullscreen{top:0!important;left:0!important;width:100%!important;height:100%!important;transform:none!important}.reveal .slides section .fragment{opacity:0;visibility:hidden;transition:all .2s ease;will-change:opacity}.reveal .slides section .fragment.visible{opacity:1;visibility:inherit}.reveal .slides section .fragment.disabled{transition:none}.reveal .slides section .fragment.grow{opacity:1;visibility:inherit}.reveal .slides section .fragment.grow.visible{transform:scale(1.3)}.reveal .slides section .fragment.shrink{opacity:1;visibility:inherit}.reveal .slides section .fragment.shrink.visible{transform:scale(.7)}.reveal .slides section .fragment.zoom-in{transform:scale(.1)}.reveal .slides section .fragment.zoom-in.visible{transform:none}.reveal .slides section .fragment.fade-out{opacity:1;visibility:inherit}.reveal .slides section .fragment.fade-out.visible{opacity:0;visibility:hidden}.reveal .slides section .fragment.semi-fade-out{opacity:1;visibility:inherit}.reveal .slides section .fragment.semi-fade-out.visible{opacity:.5;visibility:inherit}.reveal .slides section .fragment.strike{opacity:1;visibility:inherit}.reveal .slides section .fragment.strike.visible{text-decoration:line-through}.reveal .slides section .fragment.fade-up{transform:translate(0,40px)}.reveal .slides section .fragment.fade-up.visible{transform:translate(0,0)}.reveal .slides section .fragment.fade-down{transform:translate(0,-40px)}.reveal .slides section .fragment.fade-down.visible{transform:translate(0,0)}.reveal .slides section .fragment.fade-right{transform:translate(-40px,0)}.reveal .slides section .fragment.fade-right.visible{transform:translate(0,0)}.reveal .slides section .fragment.fade-left{transform:translate(40px,0)}.reveal .slides section .fragment.fade-left.visible{transform:translate(0,0)}.reveal .slides section .fragment.current-visible,.reveal .slides section .fragment.fade-in-then-out{opacity:0;visibility:hidden}.reveal .slides section .fragment.current-visible.current-fragment,.reveal .slides section .fragment.fade-in-then-out.current-fragment{opacity:1;visibility:inherit}.reveal .slides section .fragment.fade-in-then-semi-out{opacity:0;visibility:hidden}.reveal .slides section .fragment.fade-in-then-semi-out.visible{opacity:.5;visibility:inherit}.reveal .slides section .fragment.fade-in-then-semi-out.current-fragment{opacity:1;visibility:inherit}.reveal .slides section .fragment.highlight-blue,.reveal .slides section .fragment.highlight-current-blue,.reveal .slides section .fragment.highlight-current-green,.reveal .slides section .fragment.highlight-current-red,.reveal .slides section .fragment.highlight-green,.reveal .slides section .fragment.highlight-red{opacity:1;visibility:inherit}.reveal .slides section .fragment.highlight-red.visible{color:#ff2c2d}.reveal .slides section .fragment.highlight-green.visible{color:#17ff2e}.reveal .slides section .fragment.highlight-blue.visible{color:#1b91ff}.reveal .slides section .fragment.highlight-current-red.current-fragment{color:#ff2c2d}.reveal .slides section .fragment.highlight-current-green.current-fragment{color:#17ff2e}.reveal .slides section .fragment.highlight-current-blue.current-fragment{color:#1b91ff}.reveal:after{content:"";font-style:italic}.reveal iframe{z-index:1}.reveal a{position:relative}@keyframes bounce-right{0%,10%,25%,40%,50%{transform:translateX(0)}20%{transform:translateX(10px)}30%{transform:translateX(-5px)}}@keyframes bounce-left{0%,10%,25%,40%,50%{transform:translateX(0)}20%{transform:translateX(-10px)}30%{transform:translateX(5px)}}@keyframes bounce-down{0%,10%,25%,40%,50%{transform:translateY(0)}20%{transform:translateY(10px)}30%{transform:translateY(-5px)}}.reveal .controls{display:none;position:absolute;top:auto;bottom:12px;right:12px;left:auto;z-index:11;color:#000;pointer-events:none;font-size:10px}.reveal .controls button{position:absolute;padding:0;background-color:transparent;border:0;outline:0;cursor:pointer;color:currentColor;transform:scale(.9999);transition:color .2s ease,opacity .2s ease,transform .2s ease;z-index:2;pointer-events:auto;font-size:inherit;visibility:hidden;opacity:0;-webkit-appearance:none;-webkit-tap-highlight-color:transparent}.reveal .controls .controls-arrow:after,.reveal .controls .controls-arrow:before{content:"";position:absolute;top:0;left:0;width:2.6em;height:.5em;border-radius:.25em;background-color:currentColor;transition:all .15s ease,background-color .8s ease;transform-origin:.2em 50%;will-change:transform}.reveal .controls .controls-arrow{position:relative;width:3.6em;height:3.6em}.reveal .controls .controls-arrow:before{transform:translateX(.5em) translateY(1.55em) rotate(45deg)}.reveal .controls .controls-arrow:after{transform:translateX(.5em) translateY(1.55em) rotate(-45deg)}.reveal .controls .controls-arrow:hover:before{transform:translateX(.5em) translateY(1.55em) rotate(40deg)}.reveal .controls .controls-arrow:hover:after{transform:translateX(.5em) translateY(1.55em) rotate(-40deg)}.reveal .controls .controls-arrow:active:before{transform:translateX(.5em) translateY(1.55em) rotate(36deg)}.reveal .controls .controls-arrow:active:after{transform:translateX(.5em) translateY(1.55em) rotate(-36deg)}.reveal .controls .navigate-left{right:6.4em;bottom:3.2em;transform:translateX(-10px)}.reveal .controls .navigate-left.highlight{animation:bounce-left 2s 50 both ease-out}.reveal .controls .navigate-right{right:0;bottom:3.2em;transform:translateX(10px)}.reveal .controls .navigate-right .controls-arrow{transform:rotate(180deg)}.reveal .controls .navigate-right.highlight{animation:bounce-right 2s 50 both ease-out}.reveal .controls .navigate-up{right:3.2em;bottom:6.4em;transform:translateY(-10px)}.reveal .controls .navigate-up .controls-arrow{transform:rotate(90deg)}.reveal .controls .navigate-down{right:3.2em;bottom:-1.4em;padding-bottom:1.4em;transform:translateY(10px)}.reveal .controls .navigate-down .controls-arrow{transform:rotate(-90deg)}.reveal .controls .navigate-down.highlight{animation:bounce-down 2s 50 both ease-out}.reveal .controls[data-controls-back-arrows=faded] .navigate-up.enabled{opacity:.3}.reveal .controls[data-controls-back-arrows=faded] .navigate-up.enabled:hover{opacity:1}.reveal .controls[data-controls-back-arrows=hidden] .navigate-up.enabled{opacity:0;visibility:hidden}.reveal .controls .enabled{visibility:visible;opacity:.9;cursor:pointer;transform:none}.reveal .controls .enabled.fragmented{opacity:.5}.reveal .controls .enabled.fragmented:hover,.reveal .controls .enabled:hover{opacity:1}.reveal:not(.rtl) .controls[data-controls-back-arrows=faded] .navigate-left.enabled{opacity:.3}.reveal:not(.rtl) .controls[data-controls-back-arrows=faded] .navigate-left.enabled:hover{opacity:1}.reveal:not(.rtl) .controls[data-controls-back-arrows=hidden] .navigate-left.enabled{opacity:0;visibility:hidden}.reveal.rtl .controls[data-controls-back-arrows=faded] .navigate-right.enabled{opacity:.3}.reveal.rtl .controls[data-controls-back-arrows=faded] .navigate-right.enabled:hover{opacity:1}.reveal.rtl .controls[data-controls-back-arrows=hidden] .navigate-right.enabled{opacity:0;visibility:hidden}.reveal[data-navigation-mode=linear].has-horizontal-slides .navigate-down,.reveal[data-navigation-mode=linear].has-horizontal-slides .navigate-up{display:none}.reveal:not(.has-vertical-slides) .controls .navigate-left,.reveal[data-navigation-mode=linear].has-horizontal-slides .navigate-left{bottom:1.4em;right:5.5em}.reveal:not(.has-vertical-slides) .controls .navigate-right,.reveal[data-navigation-mode=linear].has-horizontal-slides .navigate-right{bottom:1.4em;right:.5em}.reveal:not(.has-horizontal-slides) .controls .navigate-up{right:1.4em;bottom:5em}.reveal:not(.has-horizontal-slides) .controls .navigate-down{right:1.4em;bottom:.5em}.reveal.has-dark-background .controls{color:#fff}.reveal.has-light-background .controls{color:#000}.reveal.no-hover .controls .controls-arrow:active:before,.reveal.no-hover .controls .controls-arrow:hover:before{transform:translateX(.5em) translateY(1.55em) rotate(45deg)}.reveal.no-hover .controls .controls-arrow:active:after,.reveal.no-hover .controls .controls-arrow:hover:after{transform:translateX(.5em) translateY(1.55em) rotate(-45deg)}@media screen and (min-width:500px){.reveal .controls[data-controls-layout=edges]{top:0;right:0;bottom:0;left:0}.reveal .controls[data-controls-layout=edges] .navigate-down,.reveal .controls[data-controls-layout=edges] .navigate-left,.reveal .controls[data-controls-layout=edges] .navigate-right,.reveal .controls[data-controls-layout=edges] .navigate-up{bottom:auto;right:auto}.reveal .controls[data-controls-layout=edges] .navigate-left{top:50%;left:.8em;margin-top:-1.8em}.reveal .controls[data-controls-layout=edges] .navigate-right{top:50%;right:.8em;margin-top:-1.8em}.reveal .controls[data-controls-layout=edges] .navigate-up{top:.8em;left:50%;margin-left:-1.8em}.reveal .controls[data-controls-layout=edges] .navigate-down{bottom:-.3em;left:50%;margin-left:-1.8em}}.reveal .progress{position:absolute;display:none;height:3px;width:100%;bottom:0;left:0;z-index:10;background-color:rgba(0,0,0,.2);color:#fff}.reveal .progress:after{content:"";display:block;position:absolute;height:10px;width:100%;top:-10px}.reveal .progress span{display:block;height:100%;width:100%;background-color:currentColor;transition:transform .8s cubic-bezier(.26,.86,.44,.985);transform-origin:0 0;transform:scaleX(0)}.reveal .slide-number{position:absolute;display:block;right:8px;bottom:8px;z-index:31;font-family:Helvetica,sans-serif;font-size:12px;line-height:1;color:#fff;background-color:rgba(0,0,0,.4);padding:5px}.reveal .slide-number a{color:currentColor}.reveal .slide-number-delimiter{margin:0 3px}.reveal{position:relative;width:100%;height:100%;overflow:hidden;touch-action:pinch-zoom}.reveal.embedded{touch-action:pan-y}.reveal .slides{position:absolute;width:100%;height:100%;top:0;right:0;bottom:0;left:0;margin:auto;pointer-events:none;overflow:visible;z-index:1;text-align:center;perspective:600px;perspective-origin:50% 40%}.reveal .slides>section{perspective:600px}.reveal .slides>section,.reveal .slides>section>section{display:none;position:absolute;width:100%;pointer-events:auto;z-index:10;transform-style:flat;transition:transform-origin .8s cubic-bezier(.26,.86,.44,.985),transform .8s cubic-bezier(.26,.86,.44,.985),visibility .8s cubic-bezier(.26,.86,.44,.985),opacity .8s cubic-bezier(.26,.86,.44,.985)}.reveal[data-transition-speed=fast] .slides section{transition-duration:.4s}.reveal[data-transition-speed=slow] .slides section{transition-duration:1.2s}.reveal .slides section[data-transition-speed=fast]{transition-duration:.4s}.reveal .slides section[data-transition-speed=slow]{transition-duration:1.2s}.reveal .slides>section.stack{padding-top:0;padding-bottom:0;pointer-events:none;height:100%}.reveal .slides>section.present,.reveal .slides>section>section.present{display:block;z-index:11;opacity:1}.reveal .slides>section:empty,.reveal .slides>section>section:empty,.reveal .slides>section>section[data-background-interactive],.reveal .slides>section[data-background-interactive]{pointer-events:none}.reveal.center,.reveal.center .slides,.reveal.center .slides section{min-height:0!important}.reveal .slides>section:not(.present),.reveal .slides>section>section:not(.present){pointer-events:none}.reveal.overview .slides>section,.reveal.overview .slides>section>section{pointer-events:auto}.reveal .slides>section.future,.reveal .slides>section.past,.reveal .slides>section>section.future,.reveal .slides>section>section.past{opacity:0}.reveal .slides>section[data-transition=slide].past,.reveal .slides>section[data-transition~=slide-out].past,.reveal.slide .slides>section:not([data-transition]).past{transform:translate(-150%,0)}.reveal .slides>section[data-transition=slide].future,.reveal .slides>section[data-transition~=slide-in].future,.reveal.slide .slides>section:not([data-transition]).future{transform:translate(150%,0)}.reveal .slides>section>section[data-transition=slide].past,.reveal .slides>section>section[data-transition~=slide-out].past,.reveal.slide .slides>section>section:not([data-transition]).past{transform:translate(0,-150%)}.reveal .slides>section>section[data-transition=slide].future,.reveal .slides>section>section[data-transition~=slide-in].future,.reveal.slide .slides>section>section:not([data-transition]).future{transform:translate(0,150%)}.reveal .slides>section[data-transition=linear].past,.reveal .slides>section[data-transition~=linear-out].past,.reveal.linear .slides>section:not([data-transition]).past{transform:translate(-150%,0)}.reveal .slides>section[data-transition=linear].future,.reveal .slides>section[data-transition~=linear-in].future,.reveal.linear .slides>section:not([data-transition]).future{transform:translate(150%,0)}.reveal .slides>section>section[data-transition=linear].past,.reveal .slides>section>section[data-transition~=linear-out].past,.reveal.linear .slides>section>section:not([data-transition]).past{transform:translate(0,-150%)}.reveal .slides>section>section[data-transition=linear].future,.reveal .slides>section>section[data-transition~=linear-in].future,.reveal.linear .slides>section>section:not([data-transition]).future{transform:translate(0,150%)}.reveal .slides section[data-transition=default].stack,.reveal.default .slides section.stack{transform-style:preserve-3d}.reveal .slides>section[data-transition=default].past,.reveal .slides>section[data-transition~=default-out].past,.reveal.default .slides>section:not([data-transition]).past{transform:translate3d(-100%,0,0) rotateY(-90deg) translate3d(-100%,0,0)}.reveal .slides>section[data-transition=default].future,.reveal .slides>section[data-transition~=default-in].future,.reveal.default .slides>section:not([data-transition]).future{transform:translate3d(100%,0,0) rotateY(90deg) translate3d(100%,0,0)}.reveal .slides>section>section[data-transition=default].past,.reveal .slides>section>section[data-transition~=default-out].past,.reveal.default .slides>section>section:not([data-transition]).past{transform:translate3d(0,-300px,0) rotateX(70deg) translate3d(0,-300px,0)}.reveal .slides>section>section[data-transition=default].future,.reveal .slides>section>section[data-transition~=default-in].future,.reveal.default .slides>section>section:not([data-transition]).future{transform:translate3d(0,300px,0) rotateX(-70deg) translate3d(0,300px,0)}.reveal .slides section[data-transition=convex].stack,.reveal.convex .slides section.stack{transform-style:preserve-3d}.reveal .slides>section[data-transition=convex].past,.reveal .slides>section[data-transition~=convex-out].past,.reveal.convex .slides>section:not([data-transition]).past{transform:translate3d(-100%,0,0) rotateY(-90deg) translate3d(-100%,0,0)}.reveal .slides>section[data-transition=convex].future,.reveal .slides>section[data-transition~=convex-in].future,.reveal.convex .slides>section:not([data-transition]).future{transform:translate3d(100%,0,0) rotateY(90deg) translate3d(100%,0,0)}.reveal .slides>section>section[data-transition=convex].past,.reveal .slides>section>section[data-transition~=convex-out].past,.reveal.convex .slides>section>section:not([data-transition]).past{transform:translate3d(0,-300px,0) rotateX(70deg) translate3d(0,-300px,0)}.reveal .slides>section>section[data-transition=convex].future,.reveal .slides>section>section[data-transition~=convex-in].future,.reveal.convex .slides>section>section:not([data-transition]).future{transform:translate3d(0,300px,0) rotateX(-70deg) translate3d(0,300px,0)}.reveal .slides section[data-transition=concave].stack,.reveal.concave .slides section.stack{transform-style:preserve-3d}.reveal .slides>section[data-transition=concave].past,.reveal .slides>section[data-transition~=concave-out].past,.reveal.concave .slides>section:not([data-transition]).past{transform:translate3d(-100%,0,0) rotateY(90deg) translate3d(-100%,0,0)}.reveal .slides>section[data-transition=concave].future,.reveal .slides>section[data-transition~=concave-in].future,.reveal.concave .slides>section:not([data-transition]).future{transform:translate3d(100%,0,0) rotateY(-90deg) translate3d(100%,0,0)}.reveal .slides>section>section[data-transition=concave].past,.reveal .slides>section>section[data-transition~=concave-out].past,.reveal.concave .slides>section>section:not([data-transition]).past{transform:translate3d(0,-80%,0) rotateX(-70deg) translate3d(0,-80%,0)}.reveal .slides>section>section[data-transition=concave].future,.reveal .slides>section>section[data-transition~=concave-in].future,.reveal.concave .slides>section>section:not([data-transition]).future{transform:translate3d(0,80%,0) rotateX(70deg) translate3d(0,80%,0)}.reveal .slides section[data-transition=zoom],.reveal.zoom .slides section:not([data-transition]){transition-timing-function:ease}.reveal .slides>section[data-transition=zoom].past,.reveal .slides>section[data-transition~=zoom-out].past,.reveal.zoom .slides>section:not([data-transition]).past{visibility:hidden;transform:scale(16)}.reveal .slides>section[data-transition=zoom].future,.reveal .slides>section[data-transition~=zoom-in].future,.reveal.zoom .slides>section:not([data-transition]).future{visibility:hidden;transform:scale(.2)}.reveal .slides>section>section[data-transition=zoom].past,.reveal .slides>section>section[data-transition~=zoom-out].past,.reveal.zoom .slides>section>section:not([data-transition]).past{transform:scale(16)}.reveal .slides>section>section[data-transition=zoom].future,.reveal .slides>section>section[data-transition~=zoom-in].future,.reveal.zoom .slides>section>section:not([data-transition]).future{transform:scale(.2)}.reveal.cube .slides{perspective:1300px}.reveal.cube .slides section{padding:30px;min-height:700px;-webkit-backface-visibility:hidden;backface-visibility:hidden;box-sizing:border-box;transform-style:preserve-3d}.reveal.center.cube .slides section{min-height:0}.reveal.cube .slides section:not(.stack):before{content:"";position:absolute;display:block;width:100%;height:100%;left:0;top:0;background:rgba(0,0,0,.1);border-radius:4px;transform:translateZ(-20px)}.reveal.cube .slides section:not(.stack):after{content:"";position:absolute;display:block;width:90%;height:30px;left:5%;bottom:0;background:0 0;z-index:1;border-radius:4px;box-shadow:0 95px 25px rgba(0,0,0,.2);transform:translateZ(-90px) rotateX(65deg)}.reveal.cube .slides>section.stack{padding:0;background:0 0}.reveal.cube .slides>section.past{transform-origin:100% 0;transform:translate3d(-100%,0,0) rotateY(-90deg)}.reveal.cube .slides>section.future{transform-origin:0 0;transform:translate3d(100%,0,0) rotateY(90deg)}.reveal.cube .slides>section>section.past{transform-origin:0 100%;transform:translate3d(0,-100%,0) rotateX(90deg)}.reveal.cube .slides>section>section.future{transform-origin:0 0;transform:translate3d(0,100%,0) rotateX(-90deg)}.reveal.page .slides{perspective-origin:0 50%;perspective:3000px}.reveal.page .slides section{padding:30px;min-height:700px;box-sizing:border-box;transform-style:preserve-3d}.reveal.page .slides section.past{z-index:12}.reveal.page .slides section:not(.stack):before{content:"";position:absolute;display:block;width:100%;height:100%;left:0;top:0;background:rgba(0,0,0,.1);transform:translateZ(-20px)}.reveal.page .slides section:not(.stack):after{content:"";position:absolute;display:block;width:90%;height:30px;left:5%;bottom:0;background:0 0;z-index:1;border-radius:4px;box-shadow:0 95px 25px rgba(0,0,0,.2);-webkit-transform:translateZ(-90px) rotateX(65deg)}.reveal.page .slides>section.stack{padding:0;background:0 0}.reveal.page .slides>section.past{transform-origin:0 0;transform:translate3d(-40%,0,0) rotateY(-80deg)}.reveal.page .slides>section.future{transform-origin:100% 0;transform:translate3d(0,0,0)}.reveal.page .slides>section>section.past{transform-origin:0 0;transform:translate3d(0,-40%,0) rotateX(80deg)}.reveal.page .slides>section>section.future{transform-origin:0 100%;transform:translate3d(0,0,0)}.reveal .slides section[data-transition=fade],.reveal.fade .slides section:not([data-transition]),.reveal.fade .slides>section>section:not([data-transition]){transform:none;transition:opacity .5s}.reveal.fade.overview .slides section,.reveal.fade.overview .slides>section>section{transition:none}.reveal .slides section[data-transition=none],.reveal.none .slides section:not([data-transition]){transform:none;transition:none}.reveal .pause-overlay{position:absolute;top:0;left:0;width:100%;height:100%;background:#000;visibility:hidden;opacity:0;z-index:100;transition:all 1s ease}.reveal .pause-overlay .resume-button{position:absolute;bottom:20px;right:20px;color:#ccc;border-radius:2px;padding:6px 14px;border:2px solid #ccc;font-size:16px;background:0 0;cursor:pointer}.reveal .pause-overlay .resume-button:hover{color:#fff;border-color:#fff}.reveal.paused .pause-overlay{visibility:visible;opacity:1}.reveal .no-transition,.reveal .no-transition *,.reveal .slides.disable-slide-transitions section{transition:none!important}.reveal .slides.disable-slide-transitions section{transform:none!important}.reveal .backgrounds{position:absolute;width:100%;height:100%;top:0;left:0;perspective:600px}.reveal .slide-background{display:none;position:absolute;width:100%;height:100%;opacity:0;visibility:hidden;overflow:hidden;background-color:rgba(0,0,0,0);transition:all .8s cubic-bezier(.26,.86,.44,.985)}.reveal .slide-background-content{position:absolute;width:100%;height:100%;background-position:50% 50%;background-repeat:no-repeat;background-size:cover}.reveal .slide-background.stack{display:block}.reveal .slide-background.present{opacity:1;visibility:visible;z-index:2}.print-pdf .reveal .slide-background{opacity:1!important;visibility:visible!important}.reveal .slide-background video{position:absolute;width:100%;height:100%;max-width:none;max-height:none;top:0;left:0;-o-object-fit:cover;object-fit:cover}.reveal .slide-background[data-background-size=contain] video{-o-object-fit:contain;object-fit:contain}.reveal>.backgrounds .slide-background[data-background-transition=none],.reveal[data-background-transition=none]>.backgrounds .slide-background:not([data-background-transition]){transition:none}.reveal>.backgrounds .slide-background[data-background-transition=slide],.reveal[data-background-transition=slide]>.backgrounds .slide-background:not([data-background-transition]){opacity:1}.reveal>.backgrounds .slide-background.past[data-background-transition=slide],.reveal[data-background-transition=slide]>.backgrounds .slide-background.past:not([data-background-transition]){transform:translate(-100%,0)}.reveal>.backgrounds .slide-background.future[data-background-transition=slide],.reveal[data-background-transition=slide]>.backgrounds .slide-background.future:not([data-background-transition]){transform:translate(100%,0)}.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=slide],.reveal[data-background-transition=slide]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]){transform:translate(0,-100%)}.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=slide],.reveal[data-background-transition=slide]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]){transform:translate(0,100%)}.reveal>.backgrounds .slide-background.past[data-background-transition=convex],.reveal[data-background-transition=convex]>.backgrounds .slide-background.past:not([data-background-transition]){opacity:0;transform:translate3d(-100%,0,0) rotateY(-90deg) translate3d(-100%,0,0)}.reveal>.backgrounds .slide-background.future[data-background-transition=convex],.reveal[data-background-transition=convex]>.backgrounds .slide-background.future:not([data-background-transition]){opacity:0;transform:translate3d(100%,0,0) rotateY(90deg) translate3d(100%,0,0)}.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=convex],.reveal[data-background-transition=convex]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]){opacity:0;transform:translate3d(0,-100%,0) rotateX(90deg) translate3d(0,-100%,0)}.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=convex],.reveal[data-background-transition=convex]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]){opacity:0;transform:translate3d(0,100%,0) rotateX(-90deg) translate3d(0,100%,0)}.reveal>.backgrounds .slide-background.past[data-background-transition=concave],.reveal[data-background-transition=concave]>.backgrounds .slide-background.past:not([data-background-transition]){opacity:0;transform:translate3d(-100%,0,0) rotateY(90deg) translate3d(-100%,0,0)}.reveal>.backgrounds .slide-background.future[data-background-transition=concave],.reveal[data-background-transition=concave]>.backgrounds .slide-background.future:not([data-background-transition]){opacity:0;transform:translate3d(100%,0,0) rotateY(-90deg) translate3d(100%,0,0)}.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=concave],.reveal[data-background-transition=concave]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]){opacity:0;transform:translate3d(0,-100%,0) rotateX(-90deg) translate3d(0,-100%,0)}.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=concave],.reveal[data-background-transition=concave]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]){opacity:0;transform:translate3d(0,100%,0) rotateX(90deg) translate3d(0,100%,0)}.reveal>.backgrounds .slide-background[data-background-transition=zoom],.reveal[data-background-transition=zoom]>.backgrounds .slide-background:not([data-background-transition]){transition-timing-function:ease}.reveal>.backgrounds .slide-background.past[data-background-transition=zoom],.reveal[data-background-transition=zoom]>.backgrounds .slide-background.past:not([data-background-transition]){opacity:0;visibility:hidden;transform:scale(16)}.reveal>.backgrounds .slide-background.future[data-background-transition=zoom],.reveal[data-background-transition=zoom]>.backgrounds .slide-background.future:not([data-background-transition]){opacity:0;visibility:hidden;transform:scale(.2)}.reveal>.backgrounds .slide-background>.slide-background.past[data-background-transition=zoom],.reveal[data-background-transition=zoom]>.backgrounds .slide-background>.slide-background.past:not([data-background-transition]){opacity:0;visibility:hidden;transform:scale(16)}.reveal>.backgrounds .slide-background>.slide-background.future[data-background-transition=zoom],.reveal[data-background-transition=zoom]>.backgrounds .slide-background>.slide-background.future:not([data-background-transition]){opacity:0;visibility:hidden;transform:scale(.2)}.reveal[data-transition-speed=fast]>.backgrounds .slide-background{transition-duration:.4s}.reveal[data-transition-speed=slow]>.backgrounds .slide-background{transition-duration:1.2s}.reveal [data-auto-animate-target^=unmatched]{will-change:opacity}.reveal section[data-auto-animate]:not(.stack):not([data-auto-animate=running]) [data-auto-animate-target^=unmatched]{opacity:0}.reveal.overview{perspective-origin:50% 50%;perspective:700px}.reveal.overview .slides{-moz-transform-style:preserve-3d}.reveal.overview .slides section{height:100%;top:0!important;opacity:1!important;overflow:hidden;visibility:visible!important;cursor:pointer;box-sizing:border-box}.reveal.overview .slides section.present,.reveal.overview .slides section:hover{outline:10px solid rgba(150,150,150,.4);outline-offset:10px}.reveal.overview .slides section .fragment{opacity:1;transition:none}.reveal.overview .slides section:after,.reveal.overview .slides section:before{display:none!important}.reveal.overview .slides>section.stack{padding:0;top:0!important;background:0 0;outline:0;overflow:visible}.reveal.overview .backgrounds{perspective:inherit;-moz-transform-style:preserve-3d}.reveal.overview .backgrounds .slide-background{opacity:1;visibility:visible;outline:10px solid rgba(150,150,150,.1);outline-offset:10px}.reveal.overview .backgrounds .slide-background.stack{overflow:visible}.reveal.overview .slides section,.reveal.overview-deactivating .slides section{transition:none}.reveal.overview .backgrounds .slide-background,.reveal.overview-deactivating .backgrounds .slide-background{transition:none}.reveal.rtl .slides,.reveal.rtl .slides h1,.reveal.rtl .slides h2,.reveal.rtl .slides h3,.reveal.rtl .slides h4,.reveal.rtl .slides h5,.reveal.rtl .slides h6{direction:rtl;font-family:sans-serif}.reveal.rtl code,.reveal.rtl pre{direction:ltr}.reveal.rtl ol,.reveal.rtl ul{text-align:right}.reveal.rtl .progress span{transform-origin:100% 0}.reveal.has-parallax-background .backgrounds{transition:all .8s ease}.reveal.has-parallax-background[data-transition-speed=fast] .backgrounds{transition-duration:.4s}.reveal.has-parallax-background[data-transition-speed=slow] .backgrounds{transition-duration:1.2s}.reveal>.overlay{position:absolute;top:0;left:0;width:100%;height:100%;z-index:1000;background:rgba(0,0,0,.9);transition:all .3s ease}.reveal>.overlay .spinner{position:absolute;display:block;top:50%;left:50%;width:32px;height:32px;margin:-16px 0 0 -16px;z-index:10;background-image:url(data:image/gif;base64,R0lGODlhIAAgAPMAAJmZmf%2F%2F%2F6%2Bvr8nJybW1tcDAwOjo6Nvb26ioqKOjo7Ozs%2FLy8vz8%2FAAAAAAAAAAAACH%2FC05FVFNDQVBFMi4wAwEAAAAh%2FhpDcmVhdGVkIHdpdGggYWpheGxvYWQuaW5mbwAh%2BQQJCgAAACwAAAAAIAAgAAAE5xDISWlhperN52JLhSSdRgwVo1ICQZRUsiwHpTJT4iowNS8vyW2icCF6k8HMMBkCEDskxTBDAZwuAkkqIfxIQyhBQBFvAQSDITM5VDW6XNE4KagNh6Bgwe60smQUB3d4Rz1ZBApnFASDd0hihh12BkE9kjAJVlycXIg7CQIFA6SlnJ87paqbSKiKoqusnbMdmDC2tXQlkUhziYtyWTxIfy6BE8WJt5YJvpJivxNaGmLHT0VnOgSYf0dZXS7APdpB309RnHOG5gDqXGLDaC457D1zZ%2FV%2FnmOM82XiHRLYKhKP1oZmADdEAAAh%2BQQJCgAAACwAAAAAIAAgAAAE6hDISWlZpOrNp1lGNRSdRpDUolIGw5RUYhhHukqFu8DsrEyqnWThGvAmhVlteBvojpTDDBUEIFwMFBRAmBkSgOrBFZogCASwBDEY%2FCZSg7GSE0gSCjQBMVG023xWBhklAnoEdhQEfyNqMIcKjhRsjEdnezB%2BA4k8gTwJhFuiW4dokXiloUepBAp5qaKpp6%2BHo7aWW54wl7obvEe0kRuoplCGepwSx2jJvqHEmGt6whJpGpfJCHmOoNHKaHx61WiSR92E4lbFoq%2BB6QDtuetcaBPnW6%2BO7wDHpIiK9SaVK5GgV543tzjgGcghAgAh%2BQQJCgAAACwAAAAAIAAgAAAE7hDISSkxpOrN5zFHNWRdhSiVoVLHspRUMoyUakyEe8PTPCATW9A14E0UvuAKMNAZKYUZCiBMuBakSQKG8G2FzUWox2AUtAQFcBKlVQoLgQReZhQlCIJesQXI5B0CBnUMOxMCenoCfTCEWBsJColTMANldx15BGs8B5wlCZ9Po6OJkwmRpnqkqnuSrayqfKmqpLajoiW5HJq7FL1Gr2mMMcKUMIiJgIemy7xZtJsTmsM4xHiKv5KMCXqfyUCJEonXPN2rAOIAmsfB3uPoAK%2B%2BG%2Bw48edZPK%2BM6hLJpQg484enXIdQFSS1u6UhksENEQAAIfkECQoAAAAsAAAAACAAIAAABOcQyEmpGKLqzWcZRVUQnZYg1aBSh2GUVEIQ2aQOE%2BG%2BcD4ntpWkZQj1JIiZIogDFFyHI0UxQwFugMSOFIPJftfVAEoZLBbcLEFhlQiqGp1Vd140AUklUN3eCA51C1EWMzMCezCBBmkxVIVHBWd3HHl9JQOIJSdSnJ0TDKChCwUJjoWMPaGqDKannasMo6WnM562R5YluZRwur0wpgqZE7NKUm%2BFNRPIhjBJxKZteWuIBMN4zRMIVIhffcgojwCF117i4nlLnY5ztRLsnOk%2BaV%2BoJY7V7m76PdkS4trKcdg0Zc0tTcKkRAAAIfkECQoAAAAsAAAAACAAIAAABO4QyEkpKqjqzScpRaVkXZWQEximw1BSCUEIlDohrft6cpKCk5xid5MNJTaAIkekKGQkWyKHkvhKsR7ARmitkAYDYRIbUQRQjWBwJRzChi9CRlBcY1UN4g0%2FVNB0AlcvcAYHRyZPdEQFYV8ccwR5HWxEJ02YmRMLnJ1xCYp0Y5idpQuhopmmC2KgojKasUQDk5BNAwwMOh2RtRq5uQuPZKGIJQIGwAwGf6I0JXMpC8C7kXWDBINFMxS4DKMAWVWAGYsAdNqW5uaRxkSKJOZKaU3tPOBZ4DuK2LATgJhkPJMgTwKCdFjyPHEnKxFCDhEAACH5BAkKAAAALAAAAAAgACAAAATzEMhJaVKp6s2nIkolIJ2WkBShpkVRWqqQrhLSEu9MZJKK9y1ZrqYK9WiClmvoUaF8gIQSNeF1Er4MNFn4SRSDARWroAIETg1iVwuHjYB1kYc1mwruwXKC9gmsJXliGxc%2BXiUCby9ydh1sOSdMkpMTBpaXBzsfhoc5l58Gm5yToAaZhaOUqjkDgCWNHAULCwOLaTmzswadEqggQwgHuQsHIoZCHQMMQgQGubVEcxOPFAcMDAYUA85eWARmfSRQCdcMe0zeP1AAygwLlJtPNAAL19DARdPzBOWSm1brJBi45soRAWQAAkrQIykShQ9wVhHCwCQCACH5BAkKAAAALAAAAAAgACAAAATrEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiRMDjI0Fd30%2FiI2UA5GSS5UDj2l6NoqgOgN4gksEBgYFf0FDqKgHnyZ9OX8HrgYHdHpcHQULXAS2qKpENRg7eAMLC7kTBaixUYFkKAzWAAnLC7FLVxLWDBLKCwaKTULgEwbLA4hJtOkSBNqITT3xEgfLpBtzE%2FjiuL04RGEBgwWhShRgQExHBAAh%2BQQJCgAAACwAAAAAIAAgAAAE7xDISWlSqerNpyJKhWRdlSAVoVLCWk6JKlAqAavhO9UkUHsqlE6CwO1cRdCQ8iEIfzFVTzLdRAmZX3I2SfZiCqGk5dTESJeaOAlClzsJsqwiJwiqnFrb2nS9kmIcgEsjQydLiIlHehhpejaIjzh9eomSjZR%2BipslWIRLAgMDOR2DOqKogTB9pCUJBagDBXR6XB0EBkIIsaRsGGMMAxoDBgYHTKJiUYEGDAzHC9EACcUGkIgFzgwZ0QsSBcXHiQvOwgDdEwfFs0sDzt4S6BK4xYjkDOzn0unFeBzOBijIm1Dgmg5YFQwsCMjp1oJ8LyIAACH5BAkKAAAALAAAAAAgACAAAATwEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiUd6GGl6NoiPOH16iZKNlH6KmyWFOggHhEEvAwwMA0N9GBsEC6amhnVcEwavDAazGwIDaH1ipaYLBUTCGgQDA8NdHz0FpqgTBwsLqAbWAAnIA4FWKdMLGdYGEgraigbT0OITBcg5QwPT4xLrROZL6AuQAPUS7bxLpoWidY0JtxLHKhwwMJBTHgPKdEQAACH5BAkKAAAALAAAAAAgACAAAATrEMhJaVKp6s2nIkqFZF2VIBWhUsJaTokqUCoBq%2BE71SRQeyqUToLA7VxF0JDyIQh%2FMVVPMt1ECZlfcjZJ9mIKoaTl1MRIl5o4CUKXOwmyrCInCKqcWtvadL2SYhyASyNDJ0uIiUd6GAULDJCRiXo1CpGXDJOUjY%2BYip9DhToJA4RBLwMLCwVDfRgbBAaqqoZ1XBMHswsHtxtFaH1iqaoGNgAIxRpbFAgfPQSqpbgGBqUD1wBXeCYp1AYZ19JJOYgH1KwA4UBvQwXUBxPqVD9L3sbp2BNk2xvvFPJd%2BMFCN6HAAIKgNggY0KtEBAAh%2BQQJCgAAACwAAAAAIAAgAAAE6BDISWlSqerNpyJKhWRdlSAVoVLCWk6JKlAqAavhO9UkUHsqlE6CwO1cRdCQ8iEIfzFVTzLdRAmZX3I2SfYIDMaAFdTESJeaEDAIMxYFqrOUaNW4E4ObYcCXaiBVEgULe0NJaxxtYksjh2NLkZISgDgJhHthkpU4mW6blRiYmZOlh4JWkDqILwUGBnE6TYEbCgevr0N1gH4At7gHiRpFaLNrrq8HNgAJA70AWxQIH1%2BvsYMDAzZQPC9VCNkDWUhGkuE5PxJNwiUK4UfLzOlD4WvzAHaoG9nxPi5d%2BjYUqfAhhykOFwJWiAAAIfkECQoAAAAsAAAAACAAIAAABPAQyElpUqnqzaciSoVkXVUMFaFSwlpOCcMYlErAavhOMnNLNo8KsZsMZItJEIDIFSkLGQoQTNhIsFehRww2CQLKF0tYGKYSg%2BygsZIuNqJksKgbfgIGepNo2cIUB3V1B3IvNiBYNQaDSTtfhhx0CwVPI0UJe0%2Bbm4g5VgcGoqOcnjmjqDSdnhgEoamcsZuXO1aWQy8KAwOAuTYYGwi7w5h%2BKr0SJ8MFihpNbx%2B4Erq7BYBuzsdiH1jCAzoSfl0rVirNbRXlBBlLX%2BBP0XJLAPGzTkAuAOqb0WT5AH7OcdCm5B8TgRwSRKIHQtaLCwg1RAAAOwAAAAAAAAAAAA%3D%3D);visibility:visible;opacity:.6;transition:all .3s ease}.reveal>.overlay header{position:absolute;left:0;top:0;width:100%;padding:5px;z-index:2;box-sizing:border-box}.reveal>.overlay header a{display:inline-block;width:40px;height:40px;line-height:36px;padding:0 10px;float:right;opacity:.6;box-sizing:border-box}.reveal>.overlay header a:hover{opacity:1}.reveal>.overlay header a .icon{display:inline-block;width:20px;height:20px;background-position:50% 50%;background-size:100%;background-repeat:no-repeat}.reveal>.overlay header a.close .icon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAABkklEQVRYR8WX4VHDMAxG6wnoJrABZQPYBCaBTWAD2g1gE5gg6OOsXuxIlr40d81dfrSJ9V4c2VLK7spHuTJ/5wpM07QXuXc5X0opX2tEJcadjHuV80li/FgxTIEK/5QBCICBD6xEhSMGHgQPgBgLiYVAB1dpSqKDawxTohFw4JSEA3clzgIBPCURwE2JucBR7rhPJJv5OpJwDX+SfDjgx1wACQeJG1aChP9K/IMmdZ8DtESV1WyP3Bt4MwM6sj4NMxMYiqUWHQu4KYA/SYkIjOsm3BXYWMKFDwU2khjCQ4ELJUJ4SmClRArOCmSXGuKma0fYD5CbzHxFpCSGAhfAVSSUGDUk2BWZaff2g6GE15BsBQ9nwmpIGDiyHQddwNTMKkbZaf9fajXQca1EX44puJZUsnY0ObGmITE3GVLCbEhQUjGVt146j6oasWN+49Vph2w1pZ5EansNZqKBm1txbU57iRRcZ86RWMDdWtBJUHBHwoQPi1GV+JCbntmvok7iTX4/Up9mgyTc/FJYDTcndgH/AA5A/CHsyEkVAAAAAElFTkSuQmCC)}.reveal>.overlay header a.external .icon{background-image:url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAcElEQVRYR+2WSQoAIQwEzf8f7XiOMkUQxUPlGkM3hVmiQfQR9GYnH1SsAQlI4DiBqkCMoNb9y2e90IAEJPAcgdznU9+engMaeJ7Azh5Y1U67gAho4DqBqmB1buAf0MB1AlVBek83ZPkmJMGc1wAR+AAqod/B97TRpQAAAABJRU5ErkJggg==)}.reveal>.overlay .viewport{position:absolute;display:flex;top:50px;right:0;bottom:0;left:0}.reveal>.overlay.overlay-preview .viewport iframe{width:100%;height:100%;max-width:100%;max-height:100%;border:0;opacity:0;visibility:hidden;transition:all .3s ease}.reveal>.overlay.overlay-preview.loaded .viewport iframe{opacity:1;visibility:visible}.reveal>.overlay.overlay-preview.loaded .viewport-inner{position:absolute;z-index:-1;left:0;top:45%;width:100%;text-align:center;letter-spacing:normal}.reveal>.overlay.overlay-preview .x-frame-error{opacity:0;transition:opacity .3s ease .3s}.reveal>.overlay.overlay-preview.loaded .x-frame-error{opacity:1}.reveal>.overlay.overlay-preview.loaded .spinner{opacity:0;visibility:hidden;transform:scale(.2)}.reveal>.overlay.overlay-help .viewport{overflow:auto;color:#fff}.reveal>.overlay.overlay-help .viewport .viewport-inner{width:600px;margin:auto;padding:20px 20px 80px 20px;text-align:center;letter-spacing:normal}.reveal>.overlay.overlay-help .viewport .viewport-inner .title{font-size:20px}.reveal>.overlay.overlay-help .viewport .viewport-inner table{border:1px solid #fff;border-collapse:collapse;font-size:16px}.reveal>.overlay.overlay-help .viewport .viewport-inner table td,.reveal>.overlay.overlay-help .viewport .viewport-inner table th{width:200px;padding:14px;border:1px solid #fff;vertical-align:middle}.reveal>.overlay.overlay-help .viewport .viewport-inner table th{padding-top:20px;padding-bottom:20px}.reveal .playback{position:absolute;left:15px;bottom:20px;z-index:30;cursor:pointer;transition:all .4s ease;-webkit-tap-highlight-color:transparent}.reveal.overview .playback{opacity:0;visibility:hidden}.reveal .hljs{min-height:100%}.reveal .hljs table{margin:initial}.reveal .hljs-ln-code,.reveal .hljs-ln-numbers{padding:0;border:0}.reveal .hljs-ln-numbers{opacity:.6;padding-right:.75em;text-align:right;vertical-align:top}.reveal .hljs.has-highlights tr:not(.highlight-line){opacity:.4}.reveal .hljs:not(:first-child).fragment{position:absolute;top:0;left:0;width:100%;box-sizing:border-box}.reveal pre[data-auto-animate-target]{overflow:hidden}.reveal pre[data-auto-animate-target] code{height:100%}.reveal .roll{display:inline-block;line-height:1.2;overflow:hidden;vertical-align:top;perspective:400px;perspective-origin:50% 50%}.reveal .roll:hover{background:0 0;text-shadow:none}.reveal .roll span{display:block;position:relative;padding:0 2px;pointer-events:none;transition:all .4s ease;transform-origin:50% 0;transform-style:preserve-3d;-webkit-backface-visibility:hidden;backface-visibility:hidden}.reveal .roll:hover span{background:rgba(0,0,0,.5);transform:translate3d(0,0,-45px) rotateX(90deg)}.reveal .roll span:after{content:attr(data-title);display:block;position:absolute;left:0;top:0;padding:0 2px;-webkit-backface-visibility:hidden;backface-visibility:hidden;transform-origin:50% 0;transform:translate3d(0,110%,0) rotateX(-90deg)}.reveal aside.notes{display:none}.reveal .speaker-notes{display:none;position:absolute;width:33.3333333333%;height:100%;top:0;left:100%;padding:14px 18px 14px 18px;z-index:1;font-size:18px;line-height:1.4;border:1px solid rgba(0,0,0,.05);color:#222;background-color:#f5f5f5;overflow:auto;box-sizing:border-box;text-align:left;font-family:Helvetica,sans-serif;-webkit-overflow-scrolling:touch}.reveal .speaker-notes .notes-placeholder{color:#ccc;font-style:italic}.reveal .speaker-notes:focus{outline:0}.reveal .speaker-notes:before{content:"Speaker notes";display:block;margin-bottom:10px;opacity:.5}.reveal.show-notes{max-width:75%;overflow:visible}.reveal.show-notes .speaker-notes{display:block}@media screen and (min-width:1600px){.reveal .speaker-notes{font-size:20px}}@media screen and (max-width:1024px){.reveal.show-notes{border-left:0;max-width:none;max-height:70%;max-height:70vh;overflow:visible}.reveal.show-notes .speaker-notes{top:100%;left:0;width:100%;height:30vh;border:0}}@media screen and (max-width:600px){.reveal.show-notes{max-height:60%;max-height:60vh}.reveal.show-notes .speaker-notes{top:100%;height:40vh}.reveal .speaker-notes{font-size:14px}}.zoomed .reveal *,.zoomed .reveal :after,.zoomed .reveal :before{-webkit-backface-visibility:visible!important;backface-visibility:visible!important}.zoomed .reveal .controls,.zoomed .reveal .progress{opacity:0}.zoomed .reveal .roll span{background:0 0}.zoomed .reveal .roll span:after{visibility:hidden}html.print-pdf *{-webkit-print-color-adjust:exact}html.print-pdf{width:100%;height:100%;overflow:visible}html.print-pdf body{margin:0 auto!important;border:0;padding:0;float:none!important;overflow:visible}html.print-pdf .nestedarrow,html.print-pdf .reveal .controls,html.print-pdf .reveal .playback,html.print-pdf .reveal .progress,html.print-pdf .reveal.overview,html.print-pdf .state-background{display:none!important}html.print-pdf .reveal pre code{overflow:hidden!important;font-family:Courier,"Courier New",monospace!important}html.print-pdf .reveal{width:auto!important;height:auto!important;overflow:hidden!important}html.print-pdf .reveal .slides{position:static;width:100%!important;height:auto!important;zoom:1!important;pointer-events:initial;left:auto;top:auto;margin:0!important;padding:0!important;overflow:visible;display:block;perspective:none;perspective-origin:50% 50%}html.print-pdf .reveal .slides .pdf-page{position:relative;overflow:hidden;z-index:1;page-break-after:always}html.print-pdf .reveal .slides section{visibility:visible!important;display:block!important;position:absolute!important;margin:0!important;padding:0!important;box-sizing:border-box!important;min-height:1px;opacity:1!important;transform-style:flat!important;transform:none!important}html.print-pdf .reveal section.stack{position:relative!important;margin:0!important;padding:0!important;page-break-after:avoid!important;height:auto!important;min-height:auto!important}html.print-pdf .reveal img{box-shadow:none}html.print-pdf .reveal .backgrounds{display:none}html.print-pdf .reveal .slide-background{display:block!important;position:absolute;top:0;left:0;width:100%;height:100%;z-index:auto!important}html.print-pdf .reveal.show-notes{max-width:none;max-height:none}html.print-pdf .reveal .speaker-notes-pdf{display:block;width:100%;height:auto;max-height:none;top:auto;right:auto;bottom:auto;left:auto;z-index:100}html.print-pdf .reveal .speaker-notes-pdf[data-layout=separate-page]{position:relative;color:inherit;background-color:transparent;padding:20px;page-break-after:always;border:0}html.print-pdf .reveal .slide-number-pdf{display:block;position:absolute;font-size:14px}html.print-pdf .aria-status{display:none}@media print{html:not(.print-pdf){background:#fff;width:auto;height:auto;overflow:visible}html:not(.print-pdf) body{background:#fff;font-size:20pt;width:auto;height:auto;border:0;margin:0 5%;padding:0;overflow:visible;float:none!important}html:not(.print-pdf) .controls,html:not(.print-pdf) .fork-reveal,html:not(.print-pdf) .nestedarrow,html:not(.print-pdf) .reveal .backgrounds,html:not(.print-pdf) .reveal .progress,html:not(.print-pdf) .reveal .slide-number,html:not(.print-pdf) .share-reveal,html:not(.print-pdf) .state-background{display:none!important}html:not(.print-pdf) body,html:not(.print-pdf) li,html:not(.print-pdf) p,html:not(.print-pdf) td{font-size:20pt!important;color:#000}html:not(.print-pdf) h1,html:not(.print-pdf) h2,html:not(.print-pdf) h3,html:not(.print-pdf) h4,html:not(.print-pdf) h5,html:not(.print-pdf) h6{color:#000!important;height:auto;line-height:normal;text-align:left;letter-spacing:normal}html:not(.print-pdf) h1{font-size:28pt!important}html:not(.print-pdf) h2{font-size:24pt!important}html:not(.print-pdf) h3{font-size:22pt!important}html:not(.print-pdf) h4{font-size:22pt!important;font-variant:small-caps}html:not(.print-pdf) h5{font-size:21pt!important}html:not(.print-pdf) h6{font-size:20pt!important;font-style:italic}html:not(.print-pdf) a:link,html:not(.print-pdf) a:visited{color:#000!important;font-weight:700;text-decoration:underline}html:not(.print-pdf) div,html:not(.print-pdf) ol,html:not(.print-pdf) p,html:not(.print-pdf) ul{visibility:visible;position:static;width:auto;height:auto;display:block;overflow:visible;margin:0;text-align:left!important}html:not(.print-pdf) .reveal pre,html:not(.print-pdf) .reveal table{margin-left:0;margin-right:0}html:not(.print-pdf) .reveal pre code{padding:20px}html:not(.print-pdf) .reveal blockquote{margin:20px 0}html:not(.print-pdf) .reveal .slides{position:static!important;width:auto!important;height:auto!important;left:0!important;top:0!important;margin-left:0!important;margin-top:0!important;padding:0!important;zoom:1!important;transform:none!important;overflow:visible!important;display:block!important;text-align:left!important;perspective:none;perspective-origin:50% 50%}html:not(.print-pdf) .reveal .slides section{visibility:visible!important;position:static!important;width:auto!important;height:auto!important;display:block!important;overflow:visible!important;left:0!important;top:0!important;margin-left:0!important;margin-top:0!important;padding:60px 20px!important;z-index:auto!important;opacity:1!important;page-break-after:always!important;transform-style:flat!important;transform:none!important;transition:none!important}html:not(.print-pdf) .reveal .slides section.stack{padding:0!important}html:not(.print-pdf) .reveal section:last-of-type{page-break-after:avoid!important}html:not(.print-pdf) .reveal section .fragment{opacity:1!important;visibility:visible!important;transform:none!important}html:not(.print-pdf) .reveal section img{display:block;margin:15px 0;background:#fff;border:1px solid #666;box-shadow:none}html:not(.print-pdf) .reveal section small{font-size:.8em}html:not(.print-pdf) .reveal .hljs{max-height:100%;white-space:pre-wrap;word-wrap:break-word;word-break:break-word;font-size:15pt}html:not(.print-pdf) .reveal .hljs .hljs-ln-numbers{white-space:nowrap}html:not(.print-pdf) .reveal .hljs td{font-size:inherit!important;color:inherit!important}}
\ No newline at end of file
diff --git a/spaces/multimodalart/latentdiffusion/latent-diffusion/ldm/modules/diffusionmodules/__init__.py b/spaces/multimodalart/latentdiffusion/latent-diffusion/ldm/modules/diffusionmodules/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/multimodalart/styleganxlclip/README.md b/spaces/multimodalart/styleganxlclip/README.md
deleted file mode 100644
index 9ae1689f84d2a36c1476efebedab685715d6fce3..0000000000000000000000000000000000000000
--- a/spaces/multimodalart/styleganxlclip/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Styleganxlclip
-emoji: 💩
-colorFrom: yellow
-colorTo: purple
-sdk: gradio
-sdk_version: 3.0.2
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/myscale/object-detection-safari/card_model.py b/spaces/myscale/object-detection-safari/card_model.py
deleted file mode 100644
index b9cb02c88f01f8040108d330fc54bf7525528594..0000000000000000000000000000000000000000
--- a/spaces/myscale/object-detection-safari/card_model.py
+++ /dev/null
@@ -1,100 +0,0 @@
-import base64
-from box_utils import cxywh2ltrb, cxywh2xywh
-
-
-def style():
- """ Style string for card models
- """
- return """
-
-
- """
-
-
-def convert_img_url(img_url):
- return img_url.replace('http://images.cocodataset.org', 'https://s3.us-east-1.amazonaws.com/images.cocodataset.org')
-
-def card(img_url, img_w, img_h, boxes):
- """ This is a hack to streamlit
- Solution thanks to: https://discuss.streamlit.io/t/display-svg/172/5
- Converting SVG to Base64 and display with tag.
- Also we used the
- """
- _boxes = ""
- img_url = convert_img_url(img_url)
- for b in boxes:
- _id, cx, cy, w, h, label, logit, is_selected = b[:8]
- x, y, w, h = cxywh2xywh(cx, cy, w, h)
- x = round(img_w * x)
- y = round(img_h * y)
- w = round(img_w * w)
- h = round(img_h * h)
- logit = "%.3f" % logit
- _boxes += f'''
- {label}: {logit}
-
- '''
- _svg = f'''
-
- '''
- _svg = r'' % \
- base64.b64encode(_svg.encode('utf-8')).decode('utf-8')
- _img_d = f'''
-
-
- {_svg}
-
- '''
- return _img_d
-
-
-def obj_card(img_url, img_w, img_h, cx, cy, w, h, *args, dst_len=100):
- """object card for displaying cropped object
-
- Args:
- Retrieved image and object info
-
- Returns:
- _obj_html: html string to display object
- """
- img_url = convert_img_url(img_url)
- w = img_w * w
- h = img_h * h
- s = max(w, h)
- x = round(img_w * cx - s / 2)
- y = round(img_h * cy - s / 2)
- scale = dst_len / s
- _obj_html = f'''
-
-
-
- '''
- return _obj_html
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Curs Fotografie Digitala.pdf LINK.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Curs Fotografie Digitala.pdf LINK.md
deleted file mode 100644
index 3a2fc0d85d58b9f914d04be5d089a9fd0fdd524d..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Curs Fotografie Digitala.pdf LINK.md
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
How to Learn Digital Photography with Curs Fotografie Digitala.pdf
-
Digital photography is a fascinating and rewarding hobby that can also become a lucrative career. But how do you learn the basics of digital photography and improve your skills? One way is to download and read Curs Fotografie Digitala.pdf, a free ebook that covers everything you need to know about digital photography.
-
Curs Fotografie Digitala.pdf is a comprehensive course that consists of 15 booklets (30 lessons) that teach you the fundamentals of modern photographic art. You will learn about different types of cameras and lenses, how to work with natural and artificial light sources, how to photograph various subjects and events, how to edit and enhance your images on the computer, and how to share your photos online.
The course is designed for both beginners and professionals, and it offers practical examples, exercises and assignments that help you master the content of the course and deepen your knowledge. The course is written in an accessible language, without technical or scientific jargon, and it answers the questions of all those who are passionate about photography.
-
Some of the topics covered in Curs Fotografie Digitala.pdf are:
-
-
How to choose the right camera and lens for your needs
-
How to use the exposure triangle (aperture, shutter speed and ISO) to control the brightness and depth of field of your photos
-
How to use different modes and settings on your camera to achieve creative effects
-
How to compose your photos using the rule of thirds, leading lines, symmetry, balance and other principles
-
How to photograph special events (weddings and parties), reportage, portraits, advertising, architecture and interior, underwater photography, infrared photography and more
-
How to work with image editing software such as Photoshop or Lightroom to crop, resize, adjust colors, contrast, brightness, sharpness and other parameters of your photos
-
How to save your photos in different formats (JPEG, RAW, TIFF etc.) and optimize them for web or print
-
How to upload your photos to online platforms such as Flickr, Instagram, Facebook or your own website
-
-
If you want to learn digital photography in an easy and fun way, download Curs Fotografie Digitala.pdf today and start your journey into the world of beautiful images.
-
-
Benefits of Digital Photography
-
Digital photography has many advantages over traditional film photography. Here are some of the benefits of digital photography that you can enjoy:
-
-
No film is needed: With digital photography, you don't have to buy and load film rolls, which can be expensive and inconvenient. You can also save money and time by avoiding the film developing process, which can take days or weeks and use harmful chemicals that damage the environment[^4^].
-
Instant feedback: With digital photography, you can see the results of your shots immediately on the camera's LCD screen or on your computer. This allows you to check your exposure, focus, composition, and other settings and make adjustments as needed. You can also delete unwanted or bad photos and free up space on your memory card[^1^].
-
Easy editing and sharing: With digital photography, you can edit and enhance your photos using software such as Photoshop or Lightroom. You can crop, resize, adjust colors, contrast, brightness, sharpness, and other parameters of your photos. You can also apply filters, effects, and other creative touches to your photos. You can then save your photos in different formats (JPEG, RAW, TIFF etc.) and optimize them for web or print[^3^]. You can also upload your photos to online platforms such as Flickr, Instagram, Facebook or your own website and share them with your friends, family, or clients[^1^].
-
High resolution and quality: Digital photography offers high resolution and quality images that can capture more details and nuances than film. Today's digital cameras have sensors that range from 12 megapixels to 24 megapixels or more, which means they can produce sharp and clear images that can be enlarged or cropped without losing quality[^2^]. Digital photography also offers high dynamic range (HDR), which means it can capture more variation between light and dark areas of an image than film[^1^].
-
Creative possibilities: Digital photography opens up a world of creative possibilities that are not possible or easy with film. You can use different modes and settings on your camera to achieve various effects, such as long exposure, time-lapse, burst mode, panorama, macro, etc. You can also use different types of cameras and lenses to capture different perspectives and angles, such as drones, smart cameras, 360 cameras, etc. You can also experiment with different genres and styles of photography, such as underwater photography, infrared photography, street photography, etc.[^2^]
-
-
Digital photography is a fun and rewarding hobby that can also become a lucrative career. By downloading and reading Curs Fotografie Digitala.pdf, you can learn the basics of digital photography and improve your skills in an easy and enjoyable way.
- e93f5a0c3f
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dassault Systemes CATIA V5R21 SP3.0 X86 X64 Upgrade __EXCLUSIVE__.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dassault Systemes CATIA V5R21 SP3.0 X86 X64 Upgrade __EXCLUSIVE__.md
deleted file mode 100644
index 0afd2254ff65236e08e77d9aa17233cd70cd9247..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Dassault Systemes CATIA V5R21 SP3.0 X86 X64 Upgrade __EXCLUSIVE__.md
+++ /dev/null
@@ -1,130 +0,0 @@
-
-
Dassault Systemes CATIA V5R21 SP3.0 X86 X64 Upgrade: What You Need to Know
-
CATIA is one of the most widely used software solutions for design and engineering in various industries, such as aerospace, automotive, industrial equipment, energy, consumer goods, etc. It enables users to create
CATIA is one of the most widely used software solutions for design and engineering in various industries, such as aerospace, automotive, industrial equipment, energy, consumer goods, etc. It enables users to create, simulate, analyze, and optimize complex products and systems in a collaborative environment. CATIA is part of the Dassault Systemes 3DEXPERIENCE platform, which integrates various applications and services for product lifecycle management (PLM).
-
If you are a CATIA user, you may be wondering why you should upgrade to the latest version of CATIA V5R21 SP3.0, which was released in March 2023. In this article, we will explain what's new in CATIA V5R21 SP3.0, how to upgrade to it, and some tips and tricks for using it. By the end of this article, you will have a better understanding of the benefits of upgrading to CATIA V5R21 SP3.0 and how to make the most of it.
-
Dassault Systemes CATIA V5R21 SP3.0 X86 X64 Upgrade
CATIA V5R21 SP3.0 is the third service pack for CATIA V5R21, which was launched in June 2022. It includes several new features and enhancements that improve the user experience and performance of CATIA. Here are some of the main highlights of CATIA V5R21 SP3.0:
-
Virtual Design Enhancements
-
CATIA V5R21 SP3.0 improves the virtual design capabilities of CATIA by adding or improving the following features:
-
-
Imagine & Shape: This feature allows users to create and modify organic shapes using intuitive sculpting tools. CATIA V5R21 SP3.0 introduces a new Sketch on Surface tool that enables users to sketch curves on any surface and use them as guides for sculpting.
-
Lofting: This feature allows users to create smooth surfaces by sweeping one or more profiles along one or more guides. CATIA V5R21 SP3.0 introduces a new Loft with Centerline option that enables users to define a centerline as a guide for lofting.
-
Generative Shape Design: This feature allows users to create and modify complex shapes using advanced surface modeling tools. CATIA V5R21 SP3.0 introduces a new Blend Corner tool that enables users to create smooth transitions between surfaces with different curvatures.
-
-
Analysis and Simulation Enhancements
-
CATIA V5R21 SP3.0 improves the analysis and simulation capabilities of CATIA by adding or improving the following features:
-
-
SIMULIA Extended Analysis: This feature allows users to perform advanced finite element analysis (FEA) on their models using the SIMULIA Abaqus solver. CATIA V5R21 SP3.0 introduces a new Contact Analysis option that enables users to simulate the interaction between contacting parts and account for friction, clearance, and interference.
-
Abaqus for CATIA: This feature allows users to access the full functionality of SIMULIA Abaqus within the CATIA environment. CATIA V5R21 SP3.0 introduces a new Abaqus/Explicit option that enables users to perform dynamic analysis of highly nonlinear problems involving large deformations, impact, and failure.
-
-
Manufacturing and Production Enhancements
-
CATIA V5R21 SP3.0 improves the manufacturing and production capabilities of CATIA by adding or improving the following features:
-
-
DELMIA Shop Order: This feature allows users to create and manage shop orders for manufacturing operations using DELMIA applications. CATIA V5R21 SP3.0 introduces a new Shop Order Editor tool that enables users to edit shop orders directly in CATIA without switching to DELMIA.
-
Intercim MES integration: This feature allows users to integrate CATIA with Intercim Manufacturing Execution System (MES), which is a software solution for managing production processes and quality control. CATIA V5R21 SP3.0 introduces a new MES Connector tool that enables users to exchange data between CATIA and Intercim MES seamlessly.
-
-
Collaboration and Data Management Enhancements
Collaboration and Data Management Enhancements
-
CATIA V5R21 SP3.0 improves the collaboration and data management capabilities of CATIA by adding or improving the following features:
-
-
V6 compatibility: This feature allows users to access and work on V6 data using CATIA V5R21 SP3.0. Users can open, edit, save, and synchronize V6 data in CATIA without converting or exporting it. This enables a smooth transition from V5 to V6 and a better collaboration between V5 and V6 users.
-
ENOVIA SmarTeam Express: This feature allows users to manage their CATIA data using ENOVIA SmarTeam, which is a PLM solution for small and medium-sized enterprises. CATIA V5R21 SP3.0 introduces a new SmarTeam Express option that enables users to access ENOVIA SmarTeam directly from CATIA without installing any additional software.
-
-
How to Upgrade to CATIA V5R21 SP3.0?
-
If you are interested in upgrading to CATIA V5R21 SP3.0, you need to follow some steps and requirements to ensure a successful upgrade. Here are the main steps and requirements for upgrading to CATIA V5R21 SP3.0:
-
-
System Requirements
-
Before you upgrade to CATIA V5R21 SP3.0, you need to make sure that your system meets the minimum and recommended requirements for running CATIA V5R21 SP3.0 on Windows or Linux platforms. Here are the system requirements for CATIA V5R21 SP3.0:
-
-
-
Operating System
-
Minimum Requirements
-
Recommended Requirements
-
-
-
Windows 7/8/10 (64-bit)
-
- Intel Core i3 or equivalent processor - 4 GB of RAM - 10 GB of free disk space - OpenGL 3.2 compatible graphics card with 512 MB of VRAM
-
- Intel Core i7 or equivalent processor - 8 GB of RAM or more - 20 GB of free disk space or more - OpenGL 4.5 compatible graphics card with 2 GB of VRAM or more
-
-
-
Linux (64-bit)
-
- Intel Core i3 or equivalent processor - 4 GB of RAM - 10 GB of free disk space - OpenGL 3.2 compatible graphics card with 512 MB of VRAM
-
- Intel Core i7 or equivalent processor - 8 GB of RAM or more - 20 GB of free disk space or more - OpenGL 4.5 compatible graphics card with 2 GB of VRAM or more
-
-
-
Download and Installation
-
To upgrade to CATIA V5R21 SP3.0, you need to download the installation files from Dassault Systemes website and install them on your machine. Here are the steps for downloading and installing CATIA V5R21 SP3.0:
Navigate to the Downloads section and select CATIA V5R21 SP3.0.
-
Select the appropriate operating system (Windows or Linux) and download the installation files.
-
Extract the installation files to a folder on your machine.
-
Run the setup.exe file (for Windows) or the setup.sh file (for Linux) as an administrator.
-
Follow the instructions on the screen to complete the installation process.
-
Restart your machine after the installation is finished.
-
-
License Activation and Configuration
-
To use CATIA V5R21 SP3.0, you need to activate and configure your licenses for CATIA using DS License Server or other methods. Here are the steps for activating and configuring licenses for CATIA V5R21 SP3.0:
-
-
If you have not installed DS License Server yet, download it from Dassault Systemes website and install it on your machine.
-
If you have not received your license keys yet, contact Dassault Systemes support team and request them.
-
If you have received your license keys, copy them to the Lic
If you have received your license keys, copy them to the Licenses folder in the DS License Server installation directory.
-
Run the DS License Administration Tool from the DS License Server installation directory.
-
Select License Enabler File and browse to the license keys you copied.
-
Select Install Licenses and wait for the confirmation message.
-
Select Configure Licenses and choose the appropriate license configuration for your CATIA usage.
-
Select Apply Configuration and wait for the confirmation message.
-
Close the DS License Administration Tool and restart your machine.
-
-
Data Backup and Migration
-
To avoid losing or corrupting your data when upgrading to CATIA V5R21 SP3.0, you need to backup and migrate your data from previous versions of CATIA to CATIA V5R21 SP3.0 using tools such as ENOVIA Data Upward Compatibility or other methods. Here are the steps for backing up and migrating data for CATIA V5R21 SP3.0:
-
-
If you have not installed ENOVIA Data Upward Compatibility yet, download it from Dassault Systemes website and install it on your machine.
-
If you have not backed up your data yet, use a backup tool or method of your choice to backup your data from previous versions of CATIA.
-
If you have backed up your data, run the ENOVIA Data Upward Compatibility Tool from the ENOVIA Data Upward Compatibility installation directory.
-
Select Migrate Data and browse to the backup folder of your data.
-
Select V5R21 SP3.0 as the target version and choose the appropriate migration options for your data.
-
Select Start Migration and wait for the migration process to complete.
-
Select View Report and check for any errors or warnings in the migration report.
-
Close the ENOVIA Data Upward Compatibility Tool and verify that your data is migrated successfully to CATIA V5R21 SP3.0.
-
-
Tips and Tricks for Using CATIA V5R21 SP3.0
-
CATIA V5R21 SP3.0 is a powerful and versatile software solution for design and engineering, but it can also be challenging and complex to use. To help you use CATIA V5R21 SP3.0 more efficiently and effectively, here are some useful tips and tricks for using CATIA V5R21 SP3.0:
-
Customizing Settings and Preferences
-
CATIA V5R21 SP3.0 allows you to customize various settings and preferences for your CATIA environment, such as display, interface, tools, options, etc. To customize settings and preferences for CATIA V5R21 SP3.0, you can use tools such as Options Management or other methods. Here are some steps for customizing settings and preferences for CATIA V5R21 SP3.0:
-
-
Run CATIA V5R21 SP3.0 and select Tools > Options from the menu bar.
-
Select the category of settings or preferences you want to customize from the left panel, such as General > Display > Navigation.
-
Select the subcategory of settings or preferences you want to customize from the right panel, such as Pan Mode.
-
Select or modify the value of settings or preferences you want to customize, such as Pan with Middle Mouse Button.
-
Select OK to apply the changes and close the Options dialog box.
-
If you want to save your customized settings or preferences as a profile, select Edit > Profiles > Save As... from the menu bar and enter a name for your profile.
-
If you want to load a previously saved profile of settings or preferences, select Edit > Profiles > Load... from the menu bar and select a profile from the list.
-
-
Using Shortcuts and Commands
-
CATIA V5R21 SP3.0 provides various shortcuts and commands that can save time and effort for users when performing common tasks or operations in CATIA. To use shortcuts and commands in CATIA V5R21 SP3.0, you can use keyboard keys, mouse buttons, or command line inputs. Here is a table of some common shortcuts and commands for CAT Here is a table of some common shortcuts and commands for CATIA V5R21 SP3.0: | Shortcut or Command | Description | | --- | --- | | F1 | Opens the online help for CATIA. | | F2 | Toggles the specification tree on or off. | | F3 | Toggles the compass on or off. | | F4 | Toggles the standard view toolbar on or off. | | F5 | Refreshes the display of the current document. | | Ctrl + N | Creates a new document. | | Ctrl + O | Opens an existing document. | | Ctrl + S | Saves the current document. | | Ctrl + Z | Undoes the last action. | | Ctrl + Y | Redoes the last action. | | Ctrl + C | Copies the selected object to the clipboard. | | Ctrl + V | Pastes the object from the clipboard to the current document. | | Ctrl + X | Cuts the selected object to the clipboard. | | Ctrl + A | Selects all objects in the current document. | | Ctrl + F | Opens the search dialog box for finding objects in the current document. | | Ctrl + G | Opens the geometry analysis dialog box for checking the quality of geometry in the current document. | | Ctrl + H | Opens the hide/show dialog box for hiding or showing objects in the current document. | | Ctrl + M | Opens the measure dialog box for measuring distances, angles, areas, volumes, etc. in the current document. | | Ctrl + P | Opens the print dialog box for printing the current document. | | Ctrl + Q | Quits CATIA. | | Esc | Cancels the current operation or command. | | Spacebar | Repeats the last command or operation. | | Enter | Validates the current command or operation. | | Tab | Switches between different modes or options in a command or dialog box. | | Shift + Click | Selects multiple objects in a single selection set. | | Alt + Click | Selects an object behind another object in a transparent mode. |
Troubleshooting Issues and Errors
-
CATIA V5R21 SP3.0 may encounter some issues and errors that may affect its functionality or performance. To troubleshoot issues and errors in CATIA V5R21 SP3.0, you can use tools such as Knowledge Base or other methods. Here is a list of some common issues and errors in CATIA V5R21 SP3.0 and how to solve them:
-
-
License error: This error occurs when CATIA cannot find or access a valid license for running CATIA V5R21 SP3.0. To solve this error, you need to check your license keys, license server, network connection, firewall settings, etc.
-
Installation error: This error occurs when CATIA cannot install or update CATIA V5R21 SP3.0 properly on your machine. To solve this error, you need to check your system requirements, installation files, disk space, permissions, antivirus software, etc.
-
Data error: This error occurs when CATIA cannot open or save data from previous versions of CATIA or other formats in CATIA V5R21 SP3.0. To solve this error, you need to check your data compatibility, data integrity, data backup, data migration, etc.
-
Performance error: This error occurs when CATIA runs slowly or crashes frequently in CATIA V5R21 SP3.0. To solve this error, you need to check your system resources, memory usage, graphics settings, cache settings, etc.
-
Functionality error: This error occurs when CATIA does not work as expected or produces incorrect results in CATIA V5R21 SP3.0. To solve this error, you need to check your settings, preferences, options, commands, parameters, etc.
-
-
If you cannot solve an issue or error by yourself, you can contact Dassault Systemes support team for further assistance.
-
Conclusion
-
CATIA V5R21 SP3.0 is a powerful and versatile software solution for design and engineering that offers many new features and enhancements that improve the user experience and performance of CATIA.
-
In this article, we have explained what's new in CATIA V5R21 SP3.0, how to upgrade to it, and some tips and tricks for using it.
-
We hope that this article has helped you understand the benefits of upgrading to CATIA V5R21 SP3.0 and how to make the most of it.
-
If you are ready to upgrade to CATIA V5R21 SP3.0 or want to learn more about it, you can
If you are ready to upgrade to CATIA V5R21 SP3.0 or want to learn more about it, you can visit Dassault Systemes website and contact their support team for more information.
-
FAQs
-
Here are some frequently asked questions related to the topic of this article:
-
-
What is the difference between CATIA V5 and CATIA V6? CATIA V5 and CATIA V6 are two different versions of CATIA that have different architectures, interfaces, and functionalities. CATIA V5 is a standalone software that runs on Windows or Linux platforms, while CATIA V6 is a web-based software that runs on the 3DEXPERIENCE platform. CATIA V5 and CATIA V6 can work together using the V6 compatibility feature in CATIA V5R21 SP3.0.
-
How much does CATIA V5R21 SP3.0 cost? The cost of CATIA V5R21 SP3.0 depends on various factors, such as the type and number of licenses, the duration and mode of subscription, the level of support and maintenance, etc. You can contact Dassault Systemes sales team for a quote based on your specific needs.
-
How long does it take to upgrade to CATIA V5R21 SP3.0? The time it takes to upgrade to CATIA V5R21 SP3.0 depends on various factors, such as the size and complexity of your data, the speed and reliability of your network connection, the availability and performance of your system resources, etc. You can estimate the time it takes to upgrade to CATIA V5R21 SP3.0 by using the Upgrade Estimator tool in ENOVIA Data Upward Compatibility.
-
Is CATIA V5R21 SP3.0 compatible with other software solutions? CATIA V5R21 SP3.0 is compatible with various software solutions that are part of the Dassault Systemes 3DEXPERIENCE platform, such as ENOVIA, DELMIA, SIMULIA, etc. It is also compatible with some third-party software solutions that support standard file formats, such as STEP, IGES, DXF, etc.
-
Where can I find more resources and tutorials for CATIA V5R21 SP3.0? You can find more resources and tutorials for CATIA V5R21 SP3.0 on Dassault Systemes website, such as CATIA User Community, CATIA Learning Center, CATIA Documentation, etc. You can also find some online courses and videos on platforms such as Udemy, YouTube, Coursera, etc.
- b2dd77e56b
-
-
\ No newline at end of file
diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/E-On Vue R4 Build 4003044 Plant Factory Extra 2019 Win X64.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/E-On Vue R4 Build 4003044 Plant Factory Extra 2019 Win X64.md
deleted file mode 100644
index c22dce4350e4be5dfb722a45c9fadae397c2a23c..0000000000000000000000000000000000000000
--- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/E-On Vue R4 Build 4003044 Plant Factory Extra 2019 Win X64.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
E-On Vue R4 Build 4003044 Plant Factory Extra 2019 Win x64: A Review
-
E-On Vue R4 Build 4003044 Plant Factory Extra 2019 Win x64 is a software package that combines the power and flexibility of VUE and PlantFactory, two of the most advanced tools for creating realistic and stunning 3D environments. Whether you are a professional artist, a hobbyist, or a student, this package offers you everything you need to create breathtaking scenes with ease and efficiency.
VUE is a 3D scenery generator that allows you to design and animate natural landscapes, from planets to terrains, with a high level of detail and realism. VUE features an intuitive interface that is optimized for productivity and creativity, as well as a rich set of rendering features, including volumetric effects, glowing materials, lens flares, stars and planets, rocks, and more. VUE also supports stereo and VR rendering, as well as non-photorealistic rendering for stylized artwork.
-
PlantFactory is a 3D plant modeling and animation software that lets you create any kind of vegetation, from simple grass to complex trees, with unprecedented control and realism. PlantFactory uses a node-based system that allows you to easily adjust any aspect of your plants, such as shape, color, texture, wind, season, age, etc. PlantFactory also integrates seamlessly with VUE, so you can populate your scenes with thousands of plants in seconds.
-
E-On Vue R4 Build 4003044 Plant Factory Extra 2019 Win x64 is the latest version of the software package that was released in December 2019. It includes several improvements and new features, such as:
-
-
A new business model and product portfolio that offers three solutions for different artist profiles: Creator (for hobbyists and students), Professional (for freelancers and small studios), and Enterprise (for large studios and production houses).
-
A new licensing system that allows you to activate your software on two computers simultaneously.
-
A new user interface that is more modern and user-friendly.
-
A new render engine that is faster and more efficient.
-
A new terrain modeling technology that uses heightfield terrains for more flexibility and realism.
-
A new plant editing technology that uses subdivision surfaces for more detail and smoothness.
-
A new plant library that includes over 400 presets of plants, trees, flowers, grasses, etc.
-
A new content exchange platform that allows you to share your creations with other users.
-
-
If you are looking for a software package that can help you create stunning 3D environments with ease and efficiency, E-On Vue R4 Build 4003044 Plant Factory Extra 2019 Win x64 is a great choice. You can download it from the official website[^1^] or from other sources[^2^] [^3^] [^4^].
Here are some more paragraphs:
-
E-On Vue R4 Build 4003044 Plant Factory Extra 2019 Win x64 is not only a powerful software package, but also a fun and easy one to use. You can start creating your own 3D environments in minutes, thanks to the intuitive interface and the drag-and-drop workflow. You can also use the built-in tutorials and the online documentation to learn the basics and the advanced features of the software.
-
-
One of the most impressive aspects of E-On Vue R4 Build 4003044 Plant Factory Extra 2019 Win x64 is the quality and realism of the renders. You can achieve photorealistic results with minimal effort, thanks to the accurate lighting, shading, and atmospheric effects. You can also customize every aspect of your renders, such as resolution, quality, antialiasing, depth of field, motion blur, etc. You can even render your scenes in stereo or VR formats for immersive experiences.
-
E-On Vue R4 Build 4003044 Plant Factory Extra 2019 Win x64 is also a versatile software package that can be used for various purposes and projects. You can use it for personal or professional work, such as illustration, animation, game design, architecture, education, etc. You can also export your scenes and plants to other 3D applications, such as 3ds Max, Maya, Cinema 4D, Blender, etc. You can also import models and textures from other sources and use them in your scenes.
81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/nicole1214/text_generator/README.md b/spaces/nicole1214/text_generator/README.md
deleted file mode 100644
index e336ca9a0d08e342402ccaa2544fee94832fc58e..0000000000000000000000000000000000000000
--- a/spaces/nicole1214/text_generator/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Text Generator
-emoji: 📉
-colorFrom: pink
-colorTo: gray
-sdk: gradio
-sdk_version: 3.12.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/nielsr/vilt-vqa/README.md b/spaces/nielsr/vilt-vqa/README.md
deleted file mode 100644
index ad08bc013f6597f99b3d419c96b825f013f7f175..0000000000000000000000000000000000000000
--- a/spaces/nielsr/vilt-vqa/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Vilt Vqa
-emoji: 🌍
-colorFrom: indigo
-colorTo: gray
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/nightelf/codesandbox/Dockerfile b/spaces/nightelf/codesandbox/Dockerfile
deleted file mode 100644
index a905ef711861706570e25829b42e8f567c0e4d40..0000000000000000000000000000000000000000
--- a/spaces/nightelf/codesandbox/Dockerfile
+++ /dev/null
@@ -1,13 +0,0 @@
-FROM node:slim
-
-WORKDIR /app
-
-COPY . .
-
-EXPOSE 7860
-
-RUN apt-get update && \
- chmod 775 server index.js package.json start.sh /app &&\
- npm install -r package.json
-
-CMD ["node", "index.js"]
diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/evaluation/coco_evaluation.py b/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/evaluation/coco_evaluation.py
deleted file mode 100644
index fe8142cda29613ce1cf78523e422bf598128f590..0000000000000000000000000000000000000000
--- a/spaces/nikitaPDL2023/assignment4/detectron2/detectron2/evaluation/coco_evaluation.py
+++ /dev/null
@@ -1,722 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import contextlib
-import copy
-import io
-import itertools
-import json
-import logging
-import numpy as np
-import os
-import pickle
-from collections import OrderedDict
-import pycocotools.mask as mask_util
-import torch
-from pycocotools.coco import COCO
-from pycocotools.cocoeval import COCOeval
-from tabulate import tabulate
-
-import detectron2.utils.comm as comm
-from detectron2.config import CfgNode
-from detectron2.data import MetadataCatalog
-from detectron2.data.datasets.coco import convert_to_coco_json
-from detectron2.structures import Boxes, BoxMode, pairwise_iou
-from detectron2.utils.file_io import PathManager
-from detectron2.utils.logger import create_small_table
-
-from .evaluator import DatasetEvaluator
-
-try:
- from detectron2.evaluation.fast_eval_api import COCOeval_opt
-except ImportError:
- COCOeval_opt = COCOeval
-
-
-class COCOEvaluator(DatasetEvaluator):
- """
- Evaluate AR for object proposals, AP for instance detection/segmentation, AP
- for keypoint detection outputs using COCO's metrics.
- See http://cocodataset.org/#detection-eval and
- http://cocodataset.org/#keypoints-eval to understand its metrics.
- The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
- the metric cannot be computed (e.g. due to no predictions made).
-
- In addition to COCO, this evaluator is able to support any bounding box detection,
- instance segmentation, or keypoint detection dataset.
- """
-
- def __init__(
- self,
- dataset_name,
- tasks=None,
- distributed=True,
- output_dir=None,
- *,
- max_dets_per_image=None,
- use_fast_impl=True,
- kpt_oks_sigmas=(),
- allow_cached_coco=True,
- ):
- """
- Args:
- dataset_name (str): name of the dataset to be evaluated.
- It must have either the following corresponding metadata:
-
- "json_file": the path to the COCO format annotation
-
- Or it must be in detectron2's standard dataset format
- so it can be converted to COCO format automatically.
- tasks (tuple[str]): tasks that can be evaluated under the given
- configuration. A task is one of "bbox", "segm", "keypoints".
- By default, will infer this automatically from predictions.
- distributed (True): if True, will collect results from all ranks and run evaluation
- in the main process.
- Otherwise, will only evaluate the results in the current process.
- output_dir (str): optional, an output directory to dump all
- results predicted on the dataset. The dump contains two files:
-
- 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
- contains all the results in the format they are produced by the model.
- 2. "coco_instances_results.json" a json file in COCO's result format.
- max_dets_per_image (int): limit on the maximum number of detections per image.
- By default in COCO, this limit is to 100, but this can be customized
- to be greater, as is needed in evaluation metrics AP fixed and AP pool
- (see https://arxiv.org/pdf/2102.01066.pdf)
- This doesn't affect keypoint evaluation.
- use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
- Although the results should be very close to the official implementation in COCO
- API, it is still recommended to compute results with the official API for use in
- papers. The faster implementation also uses more RAM.
- kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
- See http://cocodataset.org/#keypoints-eval
- When empty, it will use the defaults in COCO.
- Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
- allow_cached_coco (bool): Whether to use cached coco json from previous validation
- runs. You should set this to False if you need to use different validation data.
- Defaults to True.
- """
- self._logger = logging.getLogger(__name__)
- self._distributed = distributed
- self._output_dir = output_dir
-
- if use_fast_impl and (COCOeval_opt is COCOeval):
- self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.")
- use_fast_impl = False
- self._use_fast_impl = use_fast_impl
-
- # COCOeval requires the limit on the number of detections per image (maxDets) to be a list
- # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the
- # 3rd element (100) is used as the limit on the number of detections per image when
- # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval,
- # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults.
- if max_dets_per_image is None:
- max_dets_per_image = [1, 10, 100]
- else:
- max_dets_per_image = [1, 10, max_dets_per_image]
- self._max_dets_per_image = max_dets_per_image
-
- if tasks is not None and isinstance(tasks, CfgNode):
- kpt_oks_sigmas = (
- tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
- )
- self._logger.warn(
- "COCO Evaluator instantiated using config, this is deprecated behavior."
- " Please pass in explicit arguments instead."
- )
- self._tasks = None # Infering it from predictions should be better
- else:
- self._tasks = tasks
-
- self._cpu_device = torch.device("cpu")
-
- self._metadata = MetadataCatalog.get(dataset_name)
- if not hasattr(self._metadata, "json_file"):
- if output_dir is None:
- raise ValueError(
- "output_dir must be provided to COCOEvaluator "
- "for datasets not in COCO format."
- )
- self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...")
-
- cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
- self._metadata.json_file = cache_path
- convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco)
-
- json_file = PathManager.get_local_path(self._metadata.json_file)
- with contextlib.redirect_stdout(io.StringIO()):
- self._coco_api = COCO(json_file)
-
- # Test set json files do not contain annotations (evaluation must be
- # performed using the COCO evaluation server).
- self._do_evaluation = "annotations" in self._coco_api.dataset
- if self._do_evaluation:
- self._kpt_oks_sigmas = kpt_oks_sigmas
-
- def reset(self):
- self._predictions = []
-
- def process(self, inputs, outputs):
- """
- Args:
- inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
- It is a list of dict. Each dict corresponds to an image and
- contains keys like "height", "width", "file_name", "image_id".
- outputs: the outputs of a COCO model. It is a list of dicts with key
- "instances" that contains :class:`Instances`.
- """
- for input, output in zip(inputs, outputs):
- prediction = {"image_id": input["image_id"]}
-
- if "instances" in output:
- instances = output["instances"].to(self._cpu_device)
- prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
- if "proposals" in output:
- prediction["proposals"] = output["proposals"].to(self._cpu_device)
- if len(prediction) > 1:
- self._predictions.append(prediction)
-
- def evaluate(self, img_ids=None):
- """
- Args:
- img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
- """
- if self._distributed:
- comm.synchronize()
- predictions = comm.gather(self._predictions, dst=0)
- predictions = list(itertools.chain(*predictions))
-
- if not comm.is_main_process():
- return {}
- else:
- predictions = self._predictions
-
- if len(predictions) == 0:
- self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
- return {}
-
- if self._output_dir:
- PathManager.mkdirs(self._output_dir)
- file_path = os.path.join(self._output_dir, "instances_predictions.pth")
- with PathManager.open(file_path, "wb") as f:
- torch.save(predictions, f)
-
- self._results = OrderedDict()
- if "proposals" in predictions[0]:
- self._eval_box_proposals(predictions)
- if "instances" in predictions[0]:
- self._eval_predictions(predictions, img_ids=img_ids)
- # Copy so the caller can do whatever with results
- return copy.deepcopy(self._results)
-
- def _tasks_from_predictions(self, predictions):
- """
- Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
- """
- tasks = {"bbox"}
- for pred in predictions:
- if "segmentation" in pred:
- tasks.add("segm")
- if "keypoints" in pred:
- tasks.add("keypoints")
- return sorted(tasks)
-
- def _eval_predictions(self, predictions, img_ids=None):
- """
- Evaluate predictions. Fill self._results with the metrics of the tasks.
- """
- self._logger.info("Preparing results for COCO format ...")
- coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
- tasks = self._tasks or self._tasks_from_predictions(coco_results)
-
- # unmap the category ids for COCO
- if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
- dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
- all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
- num_classes = len(all_contiguous_ids)
- assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
-
- reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
- for result in coco_results:
- category_id = result["category_id"]
- assert category_id < num_classes, (
- f"A prediction has class={category_id}, "
- f"but the dataset only has {num_classes} classes and "
- f"predicted class id should be in [0, {num_classes - 1}]."
- )
- result["category_id"] = reverse_id_mapping[category_id]
-
- if self._output_dir:
- file_path = os.path.join(self._output_dir, "coco_instances_results.json")
- self._logger.info("Saving results to {}".format(file_path))
- with PathManager.open(file_path, "w") as f:
- f.write(json.dumps(coco_results))
- f.flush()
-
- if not self._do_evaluation:
- self._logger.info("Annotations are not available for evaluation.")
- return
-
- self._logger.info(
- "Evaluating predictions with {} COCO API...".format(
- "unofficial" if self._use_fast_impl else "official"
- )
- )
- for task in sorted(tasks):
- assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
- coco_eval = (
- _evaluate_predictions_on_coco(
- self._coco_api,
- coco_results,
- task,
- kpt_oks_sigmas=self._kpt_oks_sigmas,
- cocoeval_fn=COCOeval_opt if self._use_fast_impl else COCOeval,
- img_ids=img_ids,
- max_dets_per_image=self._max_dets_per_image,
- )
- if len(coco_results) > 0
- else None # cocoapi does not handle empty results very well
- )
-
- res = self._derive_coco_results(
- coco_eval, task, class_names=self._metadata.get("thing_classes")
- )
- self._results[task] = res
-
- def _eval_box_proposals(self, predictions):
- """
- Evaluate the box proposals in predictions.
- Fill self._results with the metrics for "box_proposals" task.
- """
- if self._output_dir:
- # Saving generated box proposals to file.
- # Predicted box_proposals are in XYXY_ABS mode.
- bbox_mode = BoxMode.XYXY_ABS.value
- ids, boxes, objectness_logits = [], [], []
- for prediction in predictions:
- ids.append(prediction["image_id"])
- boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
- objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
-
- proposal_data = {
- "boxes": boxes,
- "objectness_logits": objectness_logits,
- "ids": ids,
- "bbox_mode": bbox_mode,
- }
- with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
- pickle.dump(proposal_data, f)
-
- if not self._do_evaluation:
- self._logger.info("Annotations are not available for evaluation.")
- return
-
- self._logger.info("Evaluating bbox proposals ...")
- res = {}
- areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
- for limit in [100, 1000]:
- for area, suffix in areas.items():
- stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
- key = "AR{}@{:d}".format(suffix, limit)
- res[key] = float(stats["ar"].item() * 100)
- self._logger.info("Proposal metrics: \n" + create_small_table(res))
- self._results["box_proposals"] = res
-
- def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
- """
- Derive the desired score numbers from summarized COCOeval.
-
- Args:
- coco_eval (None or COCOEval): None represents no predictions from model.
- iou_type (str):
- class_names (None or list[str]): if provided, will use it to predict
- per-category AP.
-
- Returns:
- a dict of {metric name: score}
- """
-
- metrics = {
- "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
- "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
- "keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
- }[iou_type]
-
- if coco_eval is None:
- self._logger.warn("No predictions from the model!")
- return {metric: float("nan") for metric in metrics}
-
- # the standard metrics
- results = {
- metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
- for idx, metric in enumerate(metrics)
- }
- self._logger.info(
- "Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
- )
- if not np.isfinite(sum(results.values())):
- self._logger.info("Some metrics cannot be computed and is shown as NaN.")
-
- if class_names is None or len(class_names) <= 1:
- return results
- # Compute per-category AP
- # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
- precisions = coco_eval.eval["precision"]
- # precision has dims (iou, recall, cls, area range, max dets)
- assert len(class_names) == precisions.shape[2]
-
- results_per_category = []
- for idx, name in enumerate(class_names):
- # area range index 0: all area ranges
- # max dets index -1: typically 100 per image
- precision = precisions[:, :, idx, 0, -1]
- precision = precision[precision > -1]
- ap = np.mean(precision) if precision.size else float("nan")
- results_per_category.append(("{}".format(name), float(ap * 100)))
-
- # tabulate it
- N_COLS = min(6, len(results_per_category) * 2)
- results_flatten = list(itertools.chain(*results_per_category))
- results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
- table = tabulate(
- results_2d,
- tablefmt="pipe",
- floatfmt=".3f",
- headers=["category", "AP"] * (N_COLS // 2),
- numalign="left",
- )
- self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
-
- results.update({"AP-" + name: ap for name, ap in results_per_category})
- return results
-
-
-def instances_to_coco_json(instances, img_id):
- """
- Dump an "Instances" object to a COCO-format json that's used for evaluation.
-
- Args:
- instances (Instances):
- img_id (int): the image id
-
- Returns:
- list[dict]: list of json annotations in COCO format.
- """
- num_instance = len(instances)
- if num_instance == 0:
- return []
-
- boxes = instances.pred_boxes.tensor.numpy()
- boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
- boxes = boxes.tolist()
- scores = instances.scores.tolist()
- classes = instances.pred_classes.tolist()
-
- has_mask = instances.has("pred_masks")
- if has_mask:
- # use RLE to encode the masks, because they are too large and takes memory
- # since this evaluator stores outputs of the entire dataset
- rles = [
- mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
- for mask in instances.pred_masks
- ]
- for rle in rles:
- # "counts" is an array encoded by mask_util as a byte-stream. Python3's
- # json writer which always produces strings cannot serialize a bytestream
- # unless you decode it. Thankfully, utf-8 works out (which is also what
- # the pycocotools/_mask.pyx does).
- rle["counts"] = rle["counts"].decode("utf-8")
-
- has_keypoints = instances.has("pred_keypoints")
- if has_keypoints:
- keypoints = instances.pred_keypoints
-
- results = []
- for k in range(num_instance):
- result = {
- "image_id": img_id,
- "category_id": classes[k],
- "bbox": boxes[k],
- "score": scores[k],
- }
- if has_mask:
- result["segmentation"] = rles[k]
- if has_keypoints:
- # In COCO annotations,
- # keypoints coordinates are pixel indices.
- # However our predictions are floating point coordinates.
- # Therefore we subtract 0.5 to be consistent with the annotation format.
- # This is the inverse of data loading logic in `datasets/coco.py`.
- keypoints[k][:, :2] -= 0.5
- result["keypoints"] = keypoints[k].flatten().tolist()
- results.append(result)
- return results
-
-
-# inspired from Detectron:
-# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
-def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
- """
- Evaluate detection proposal recall metrics. This function is a much
- faster alternative to the official COCO API recall evaluation code. However,
- it produces slightly different results.
- """
- # Record max overlap value for each gt box
- # Return vector of overlap values
- areas = {
- "all": 0,
- "small": 1,
- "medium": 2,
- "large": 3,
- "96-128": 4,
- "128-256": 5,
- "256-512": 6,
- "512-inf": 7,
- }
- area_ranges = [
- [0**2, 1e5**2], # all
- [0**2, 32**2], # small
- [32**2, 96**2], # medium
- [96**2, 1e5**2], # large
- [96**2, 128**2], # 96-128
- [128**2, 256**2], # 128-256
- [256**2, 512**2], # 256-512
- [512**2, 1e5**2],
- ] # 512-inf
- assert area in areas, "Unknown area range: {}".format(area)
- area_range = area_ranges[areas[area]]
- gt_overlaps = []
- num_pos = 0
-
- for prediction_dict in dataset_predictions:
- predictions = prediction_dict["proposals"]
-
- # sort predictions in descending order
- # TODO maybe remove this and make it explicit in the documentation
- inds = predictions.objectness_logits.sort(descending=True)[1]
- predictions = predictions[inds]
-
- ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
- anno = coco_api.loadAnns(ann_ids)
- gt_boxes = [
- BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
- for obj in anno
- if obj["iscrowd"] == 0
- ]
- gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
- gt_boxes = Boxes(gt_boxes)
- gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
-
- if len(gt_boxes) == 0 or len(predictions) == 0:
- continue
-
- valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
- gt_boxes = gt_boxes[valid_gt_inds]
-
- num_pos += len(gt_boxes)
-
- if len(gt_boxes) == 0:
- continue
-
- if limit is not None and len(predictions) > limit:
- predictions = predictions[:limit]
-
- overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
-
- _gt_overlaps = torch.zeros(len(gt_boxes))
- for j in range(min(len(predictions), len(gt_boxes))):
- # find which proposal box maximally covers each gt box
- # and get the iou amount of coverage for each gt box
- max_overlaps, argmax_overlaps = overlaps.max(dim=0)
-
- # find which gt box is 'best' covered (i.e. 'best' = most iou)
- gt_ovr, gt_ind = max_overlaps.max(dim=0)
- assert gt_ovr >= 0
- # find the proposal box that covers the best covered gt box
- box_ind = argmax_overlaps[gt_ind]
- # record the iou coverage of this gt box
- _gt_overlaps[j] = overlaps[box_ind, gt_ind]
- assert _gt_overlaps[j] == gt_ovr
- # mark the proposal box and the gt box as used
- overlaps[box_ind, :] = -1
- overlaps[:, gt_ind] = -1
-
- # append recorded iou coverage level
- gt_overlaps.append(_gt_overlaps)
- gt_overlaps = (
- torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
- )
- gt_overlaps, _ = torch.sort(gt_overlaps)
-
- if thresholds is None:
- step = 0.05
- thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
- recalls = torch.zeros_like(thresholds)
- # compute recall for each iou threshold
- for i, t in enumerate(thresholds):
- recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
- # ar = 2 * np.trapz(recalls, thresholds)
- ar = recalls.mean()
- return {
- "ar": ar,
- "recalls": recalls,
- "thresholds": thresholds,
- "gt_overlaps": gt_overlaps,
- "num_pos": num_pos,
- }
-
-
-def _evaluate_predictions_on_coco(
- coco_gt,
- coco_results,
- iou_type,
- kpt_oks_sigmas=None,
- cocoeval_fn=COCOeval_opt,
- img_ids=None,
- max_dets_per_image=None,
-):
- """
- Evaluate the coco results using COCOEval API.
- """
- assert len(coco_results) > 0
-
- if iou_type == "segm":
- coco_results = copy.deepcopy(coco_results)
- # When evaluating mask AP, if the results contain bbox, cocoapi will
- # use the box area as the area of the instance, instead of the mask area.
- # This leads to a different definition of small/medium/large.
- # We remove the bbox field to let mask AP use mask area.
- for c in coco_results:
- c.pop("bbox", None)
-
- coco_dt = coco_gt.loadRes(coco_results)
- coco_eval = cocoeval_fn(coco_gt, coco_dt, iou_type)
- # For COCO, the default max_dets_per_image is [1, 10, 100].
- if max_dets_per_image is None:
- max_dets_per_image = [1, 10, 100] # Default from COCOEval
- else:
- assert (
- len(max_dets_per_image) >= 3
- ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3"
- # In the case that user supplies a custom input for max_dets_per_image,
- # apply COCOevalMaxDets to evaluate AP with the custom input.
- if max_dets_per_image[2] != 100:
- coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type)
- if iou_type != "keypoints":
- coco_eval.params.maxDets = max_dets_per_image
-
- if img_ids is not None:
- coco_eval.params.imgIds = img_ids
-
- if iou_type == "keypoints":
- # Use the COCO default keypoint OKS sigmas unless overrides are specified
- if kpt_oks_sigmas:
- assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
- coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
- # COCOAPI requires every detection and every gt to have keypoints, so
- # we just take the first entry from both
- num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
- num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
- num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
- assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
- f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
- f"Ground truth contains {num_keypoints_gt} keypoints. "
- f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
- "They have to agree with each other. For meaning of OKS, please refer to "
- "http://cocodataset.org/#keypoints-eval."
- )
-
- coco_eval.evaluate()
- coco_eval.accumulate()
- coco_eval.summarize()
-
- return coco_eval
-
-
-class COCOevalMaxDets(COCOeval):
- """
- Modified version of COCOeval for evaluating AP with a custom
- maxDets (by default for COCO, maxDets is 100)
- """
-
- def summarize(self):
- """
- Compute and display summary metrics for evaluation results given
- a custom value for max_dets_per_image
- """
-
- def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100):
- p = self.params
- iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}"
- titleStr = "Average Precision" if ap == 1 else "Average Recall"
- typeStr = "(AP)" if ap == 1 else "(AR)"
- iouStr = (
- "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
- if iouThr is None
- else "{:0.2f}".format(iouThr)
- )
-
- aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
- mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
- if ap == 1:
- # dimension of precision: [TxRxKxAxM]
- s = self.eval["precision"]
- # IoU
- if iouThr is not None:
- t = np.where(iouThr == p.iouThrs)[0]
- s = s[t]
- s = s[:, :, :, aind, mind]
- else:
- # dimension of recall: [TxKxAxM]
- s = self.eval["recall"]
- if iouThr is not None:
- t = np.where(iouThr == p.iouThrs)[0]
- s = s[t]
- s = s[:, :, aind, mind]
- if len(s[s > -1]) == 0:
- mean_s = -1
- else:
- mean_s = np.mean(s[s > -1])
- print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
- return mean_s
-
- def _summarizeDets():
- stats = np.zeros((12,))
- # Evaluate AP using the custom limit on maximum detections per image
- stats[0] = _summarize(1, maxDets=self.params.maxDets[2])
- stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])
- stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])
- stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2])
- stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2])
- stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2])
- stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
- stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
- stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
- stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2])
- stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2])
- stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2])
- return stats
-
- def _summarizeKps():
- stats = np.zeros((10,))
- stats[0] = _summarize(1, maxDets=20)
- stats[1] = _summarize(1, maxDets=20, iouThr=0.5)
- stats[2] = _summarize(1, maxDets=20, iouThr=0.75)
- stats[3] = _summarize(1, maxDets=20, areaRng="medium")
- stats[4] = _summarize(1, maxDets=20, areaRng="large")
- stats[5] = _summarize(0, maxDets=20)
- stats[6] = _summarize(0, maxDets=20, iouThr=0.5)
- stats[7] = _summarize(0, maxDets=20, iouThr=0.75)
- stats[8] = _summarize(0, maxDets=20, areaRng="medium")
- stats[9] = _summarize(0, maxDets=20, areaRng="large")
- return stats
-
- if not self.eval:
- raise Exception("Please run accumulate() first")
- iouType = self.params.iouType
- if iouType == "segm" or iouType == "bbox":
- summarize = _summarizeDets
- elif iouType == "keypoints":
- summarize = _summarizeKps
- self.stats = summarize()
-
- def __str__(self):
- self.summarize()
diff --git a/spaces/oguzakif/video-object-remover/FGT_codes/RAFT/raft.py b/spaces/oguzakif/video-object-remover/FGT_codes/RAFT/raft.py
deleted file mode 100644
index 81b0ee51fde2065165499c9496661ca1f771949d..0000000000000000000000000000000000000000
--- a/spaces/oguzakif/video-object-remover/FGT_codes/RAFT/raft.py
+++ /dev/null
@@ -1,147 +0,0 @@
-import numpy as np
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .update import BasicUpdateBlock, SmallUpdateBlock
-from .extractor import BasicEncoder, SmallEncoder
-from .corr import CorrBlock, AlternateCorrBlock
-from .utils.utils import bilinear_sampler, coords_grid, upflow8
-
-try:
- autocast = torch.cuda.amp.autocast
-except:
- # dummy autocast for PyTorch < 1.6
- class autocast:
- def __init__(self, enabled):
- pass
- def __enter__(self):
- pass
- def __exit__(self, *args):
- pass
-
-
-class RAFT(nn.Module):
- def __init__(self, args):
- super(RAFT, self).__init__()
- self.args = args
-
- if args.small:
- self.hidden_dim = hdim = 96
- self.context_dim = cdim = 64
- args.corr_levels = 4
- args.corr_radius = 3
-
- else:
- self.hidden_dim = hdim = 128
- self.context_dim = cdim = 128
- args.corr_levels = 4
- args.corr_radius = 4
-
- '''if 'dropout' not in args._get_kwargs():
- args.dropout = 0
-
- if 'alternate_corr' not in args._get_kwargs():
- args.alternate_corr = False'''
- args.dropout = 0
- args.alternate_corr = False
-
- # feature network, context network, and update block
- if args.small:
- self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
- self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
- self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
-
- else:
- self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
- self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
- self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
-
-
- def freeze_bn(self):
- for m in self.modules():
- if isinstance(m, nn.BatchNorm2d):
- m.eval()
-
- def initialize_flow(self, img):
- """ Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
- N, C, H, W = img.shape
- coords0 = coords_grid(N, H//8, W//8).to(img.device)
- coords1 = coords_grid(N, H//8, W//8).to(img.device)
-
- # optical flow computed as difference: flow = coords1 - coords0
- return coords0, coords1
-
- def upsample_flow(self, flow, mask):
- """ Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
- N, _, H, W = flow.shape
- mask = mask.view(N, 1, 9, 8, 8, H, W)
- mask = torch.softmax(mask, dim=2)
-
- up_flow = F.unfold(8 * flow, [3,3], padding=1)
- up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
-
- up_flow = torch.sum(mask * up_flow, dim=2)
- up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
- return up_flow.reshape(N, 2, 8*H, 8*W)
-
-
- def forward(self, image1, image2, iters=12, flow_init=None, upsample=True, test_mode=False):
- """ Estimate optical flow between pair of frames """
-
- image1 = 2 * (image1 / 255.0) - 1.0
- image2 = 2 * (image2 / 255.0) - 1.0
-
- image1 = image1.contiguous()
- image2 = image2.contiguous()
-
- hdim = self.hidden_dim
- cdim = self.context_dim
-
- # run the feature network
- with autocast(enabled=self.args.mixed_precision):
- fmap1, fmap2 = self.fnet([image1, image2])
-
- fmap1 = fmap1.float()
- fmap2 = fmap2.float()
- if self.args.alternate_corr:
- corr_fn = CorrBlockAlternate(fmap1, fmap2, radius=self.args.corr_radius)
- else:
- corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
-
- # run the context network
- with autocast(enabled=self.args.mixed_precision):
- cnet = self.cnet(image1)
- net, inp = torch.split(cnet, [hdim, cdim], dim=1)
- net = torch.tanh(net)
- inp = torch.relu(inp)
-
- coords0, coords1 = self.initialize_flow(image1)
-
- if flow_init is not None:
- coords1 = coords1 + flow_init
-
- flow_predictions = []
- for itr in range(iters):
- coords1 = coords1.detach()
- corr = corr_fn(coords1) # index correlation volume
-
- flow = coords1 - coords0
- with autocast(enabled=self.args.mixed_precision):
- net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
-
- # F(t+1) = F(t) + \Delta(t)
- coords1 = coords1 + delta_flow
-
- # upsample predictions
- if up_mask is None:
- flow_up = upflow8(coords1 - coords0)
- else:
- flow_up = self.upsample_flow(coords1 - coords0, up_mask)
-
- flow_predictions.append(flow_up)
-
- if test_mode:
- return coords1 - coords0, flow_up
-
- return flow_predictions
diff --git a/spaces/oguzakif/video-object-remover/SiamMask/experiments/siammask_base/run.sh b/spaces/oguzakif/video-object-remover/SiamMask/experiments/siammask_base/run.sh
deleted file mode 100644
index 3319df91a84a824aae6ebd9da7d9f4dd4c51d248..0000000000000000000000000000000000000000
--- a/spaces/oguzakif/video-object-remover/SiamMask/experiments/siammask_base/run.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-ROOT=`git rev-parse --show-toplevel`
-export PYTHONPATH=$ROOT:$PYTHONPATH
-
-mkdir -p logs
-
-python -u $ROOT/tools/train_siammask.py \
- --config=config.json -b 64 \
- -j 20 --resume snapshot/checkpoint_e9.pth\
- --epochs 20 \
- --log logs/log.txt \
- 2>&1 | tee logs/train.log
-
-bash test_all.sh -s 1 -e 20 -d VOT2018 -g 4
diff --git a/spaces/oguzakif/video-object-remover/SiamMask/utils/pysot/datasets/dataset.py b/spaces/oguzakif/video-object-remover/SiamMask/utils/pysot/datasets/dataset.py
deleted file mode 100644
index 7847e486b36d7fffa34afdb332e22c2fda8a1133..0000000000000000000000000000000000000000
--- a/spaces/oguzakif/video-object-remover/SiamMask/utils/pysot/datasets/dataset.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# --------------------------------------------------------
-# Python Single Object Tracking Evaluation
-# Licensed under The MIT License [see LICENSE for details]
-# Written by Fangyi Zhang
-# @author fangyi.zhang@vipl.ict.ac.cn
-# @project https://github.com/StrangerZhang/pysot-toolkit.git
-# Revised for SiamMask by foolwood
-# --------------------------------------------------------
-class Dataset(object):
- def __init__(self, name, dataset_root):
- self.name = name
- self.dataset_root = dataset_root
- self.videos = None
-
- def __getitem__(self, idx):
- if isinstance(idx, str):
- return self.videos[idx]
- elif isinstance(idx, int):
- return self.videos[sorted(list(self.videos.keys()))[idx]]
-
- def __len__(self):
- return len(self.videos)
-
- def __iter__(self):
- keys = sorted(list(self.videos.keys()))
- for key in keys:
- yield self.videos[key]
-
- def set_tracker(self, path, tracker_names):
- """
- Args:
- path: path to tracker results,
- tracker_names: list of tracker name
- """
- self.tracker_path = path
- self.tracker_names = tracker_names
diff --git a/spaces/oschan77/animalsvision/model.py b/spaces/oschan77/animalsvision/model.py
deleted file mode 100644
index ee1a4a1c54c30d88f04f2c7e54e0deed92a0a7d3..0000000000000000000000000000000000000000
--- a/spaces/oschan77/animalsvision/model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import torchvision
-import torch.nn as nn
-
-def create_vitb16_model(
- num_classes: int,
-):
- vit_weights = torchvision.models.ViT_B_16_Weights.DEFAULT
- vit_model = torchvision.models.vit_b_16(weights=vit_weights)
- vit_transform = vit_weights.transforms()
-
- for param in vit_model.parameters():
- param.requires_grad = False
-
- vit_model.heads = nn.Sequential(
- nn.Linear(in_features=768, out_features=num_classes, bias=True),
- )
-
- return vit_model, vit_transform
diff --git a/spaces/pkiage/time_series_decomposition_demo/src/__init__.py b/spaces/pkiage/time_series_decomposition_demo/src/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tenacity/_asyncio.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tenacity/_asyncio.py
deleted file mode 100644
index 2e50cd7b40ef18e7f7ee56c0f528bf0ef88b167a..0000000000000000000000000000000000000000
--- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/tenacity/_asyncio.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# Copyright 2016 Étienne Bersac
-# Copyright 2016 Julien Danjou
-# Copyright 2016 Joshua Harlow
-# Copyright 2013-2014 Ray Holder
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools
-import sys
-import typing as t
-from asyncio import sleep
-
-from pip._vendor.tenacity import AttemptManager
-from pip._vendor.tenacity import BaseRetrying
-from pip._vendor.tenacity import DoAttempt
-from pip._vendor.tenacity import DoSleep
-from pip._vendor.tenacity import RetryCallState
-
-WrappedFnReturnT = t.TypeVar("WrappedFnReturnT")
-WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable[..., t.Awaitable[t.Any]])
-
-
-class AsyncRetrying(BaseRetrying):
- sleep: t.Callable[[float], t.Awaitable[t.Any]]
-
- def __init__(self, sleep: t.Callable[[float], t.Awaitable[t.Any]] = sleep, **kwargs: t.Any) -> None:
- super().__init__(**kwargs)
- self.sleep = sleep
-
- async def __call__( # type: ignore[override]
- self, fn: WrappedFn, *args: t.Any, **kwargs: t.Any
- ) -> WrappedFnReturnT:
- self.begin()
-
- retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
- while True:
- do = self.iter(retry_state=retry_state)
- if isinstance(do, DoAttempt):
- try:
- result = await fn(*args, **kwargs)
- except BaseException: # noqa: B902
- retry_state.set_exception(sys.exc_info()) # type: ignore[arg-type]
- else:
- retry_state.set_result(result)
- elif isinstance(do, DoSleep):
- retry_state.prepare_for_next_attempt()
- await self.sleep(do)
- else:
- return do # type: ignore[no-any-return]
-
- def __iter__(self) -> t.Generator[AttemptManager, None, None]:
- raise TypeError("AsyncRetrying object is not iterable")
-
- def __aiter__(self) -> "AsyncRetrying":
- self.begin()
- self._retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
- return self
-
- async def __anext__(self) -> AttemptManager:
- while True:
- do = self.iter(retry_state=self._retry_state)
- if do is None:
- raise StopAsyncIteration
- elif isinstance(do, DoAttempt):
- return AttemptManager(retry_state=self._retry_state)
- elif isinstance(do, DoSleep):
- self._retry_state.prepare_for_next_attempt()
- await self.sleep(do)
- else:
- raise StopAsyncIteration
-
- def wraps(self, fn: WrappedFn) -> WrappedFn:
- fn = super().wraps(fn)
- # Ensure wrapper is recognized as a coroutine function.
-
- @functools.wraps(fn)
- async def async_wrapped(*args: t.Any, **kwargs: t.Any) -> t.Any:
- return await fn(*args, **kwargs)
-
- # Preserve attributes
- async_wrapped.retry = fn.retry # type: ignore[attr-defined]
- async_wrapped.retry_with = fn.retry_with # type: ignore[attr-defined]
-
- return async_wrapped # type: ignore[return-value]
diff --git a/spaces/podsni/YouTube_Summarize_Hades/README.md b/spaces/podsni/YouTube_Summarize_Hades/README.md
deleted file mode 100644
index db0f079531030c0db60f76b3740372619a106d8c..0000000000000000000000000000000000000000
--- a/spaces/podsni/YouTube_Summarize_Hades/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: YouTube Summarize
-emoji: 👀
-colorFrom: blue
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.19.1
-app_file: app.py
-pinned: false
-license: cc
-duplicated_from: LaoCzi/YouTube_Summarize2
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/web_fileresponse.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/web_fileresponse.py
deleted file mode 100644
index f41ed3fd0a9c1e0d5e45ce1e97b99bfef8361cac..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/aiohttp/web_fileresponse.py
+++ /dev/null
@@ -1,288 +0,0 @@
-import asyncio
-import mimetypes
-import os
-import pathlib
-import sys
-from typing import ( # noqa
- IO,
- TYPE_CHECKING,
- Any,
- Awaitable,
- Callable,
- Iterator,
- List,
- Optional,
- Tuple,
- Union,
- cast,
-)
-
-from . import hdrs
-from .abc import AbstractStreamWriter
-from .helpers import ETAG_ANY, ETag
-from .typedefs import Final, LooseHeaders
-from .web_exceptions import (
- HTTPNotModified,
- HTTPPartialContent,
- HTTPPreconditionFailed,
- HTTPRequestRangeNotSatisfiable,
-)
-from .web_response import StreamResponse
-
-__all__ = ("FileResponse",)
-
-if TYPE_CHECKING: # pragma: no cover
- from .web_request import BaseRequest
-
-
-_T_OnChunkSent = Optional[Callable[[bytes], Awaitable[None]]]
-
-
-NOSENDFILE: Final[bool] = bool(os.environ.get("AIOHTTP_NOSENDFILE"))
-
-
-class FileResponse(StreamResponse):
- """A response object can be used to send files."""
-
- def __init__(
- self,
- path: Union[str, pathlib.Path],
- chunk_size: int = 256 * 1024,
- status: int = 200,
- reason: Optional[str] = None,
- headers: Optional[LooseHeaders] = None,
- ) -> None:
- super().__init__(status=status, reason=reason, headers=headers)
-
- if isinstance(path, str):
- path = pathlib.Path(path)
-
- self._path = path
- self._chunk_size = chunk_size
-
- async def _sendfile_fallback(
- self, writer: AbstractStreamWriter, fobj: IO[Any], offset: int, count: int
- ) -> AbstractStreamWriter:
- # To keep memory usage low,fobj is transferred in chunks
- # controlled by the constructor's chunk_size argument.
-
- chunk_size = self._chunk_size
- loop = asyncio.get_event_loop()
-
- await loop.run_in_executor(None, fobj.seek, offset)
-
- chunk = await loop.run_in_executor(None, fobj.read, chunk_size)
- while chunk:
- await writer.write(chunk)
- count = count - chunk_size
- if count <= 0:
- break
- chunk = await loop.run_in_executor(None, fobj.read, min(chunk_size, count))
-
- await writer.drain()
- return writer
-
- async def _sendfile(
- self, request: "BaseRequest", fobj: IO[Any], offset: int, count: int
- ) -> AbstractStreamWriter:
- writer = await super().prepare(request)
- assert writer is not None
-
- if NOSENDFILE or sys.version_info < (3, 7) or self.compression:
- return await self._sendfile_fallback(writer, fobj, offset, count)
-
- loop = request._loop
- transport = request.transport
- assert transport is not None
-
- try:
- await loop.sendfile(transport, fobj, offset, count)
- except NotImplementedError:
- return await self._sendfile_fallback(writer, fobj, offset, count)
-
- await super().write_eof()
- return writer
-
- @staticmethod
- def _strong_etag_match(etag_value: str, etags: Tuple[ETag, ...]) -> bool:
- if len(etags) == 1 and etags[0].value == ETAG_ANY:
- return True
- return any(etag.value == etag_value for etag in etags if not etag.is_weak)
-
- async def _not_modified(
- self, request: "BaseRequest", etag_value: str, last_modified: float
- ) -> Optional[AbstractStreamWriter]:
- self.set_status(HTTPNotModified.status_code)
- self._length_check = False
- self.etag = etag_value # type: ignore[assignment]
- self.last_modified = last_modified # type: ignore[assignment]
- # Delete any Content-Length headers provided by user. HTTP 304
- # should always have empty response body
- return await super().prepare(request)
-
- async def _precondition_failed(
- self, request: "BaseRequest"
- ) -> Optional[AbstractStreamWriter]:
- self.set_status(HTTPPreconditionFailed.status_code)
- self.content_length = 0
- return await super().prepare(request)
-
- async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]:
- filepath = self._path
-
- gzip = False
- if "gzip" in request.headers.get(hdrs.ACCEPT_ENCODING, ""):
- gzip_path = filepath.with_name(filepath.name + ".gz")
-
- if gzip_path.is_file():
- filepath = gzip_path
- gzip = True
-
- loop = asyncio.get_event_loop()
- st: os.stat_result = await loop.run_in_executor(None, filepath.stat)
-
- etag_value = f"{st.st_mtime_ns:x}-{st.st_size:x}"
- last_modified = st.st_mtime
-
- # https://tools.ietf.org/html/rfc7232#section-6
- ifmatch = request.if_match
- if ifmatch is not None and not self._strong_etag_match(etag_value, ifmatch):
- return await self._precondition_failed(request)
-
- unmodsince = request.if_unmodified_since
- if (
- unmodsince is not None
- and ifmatch is None
- and st.st_mtime > unmodsince.timestamp()
- ):
- return await self._precondition_failed(request)
-
- ifnonematch = request.if_none_match
- if ifnonematch is not None and self._strong_etag_match(etag_value, ifnonematch):
- return await self._not_modified(request, etag_value, last_modified)
-
- modsince = request.if_modified_since
- if (
- modsince is not None
- and ifnonematch is None
- and st.st_mtime <= modsince.timestamp()
- ):
- return await self._not_modified(request, etag_value, last_modified)
-
- if hdrs.CONTENT_TYPE not in self.headers:
- ct, encoding = mimetypes.guess_type(str(filepath))
- if not ct:
- ct = "application/octet-stream"
- should_set_ct = True
- else:
- encoding = "gzip" if gzip else None
- should_set_ct = False
-
- status = self._status
- file_size = st.st_size
- count = file_size
-
- start = None
-
- ifrange = request.if_range
- if ifrange is None or st.st_mtime <= ifrange.timestamp():
- # If-Range header check:
- # condition = cached date >= last modification date
- # return 206 if True else 200.
- # if False:
- # Range header would not be processed, return 200
- # if True but Range header missing
- # return 200
- try:
- rng = request.http_range
- start = rng.start
- end = rng.stop
- except ValueError:
- # https://tools.ietf.org/html/rfc7233:
- # A server generating a 416 (Range Not Satisfiable) response to
- # a byte-range request SHOULD send a Content-Range header field
- # with an unsatisfied-range value.
- # The complete-length in a 416 response indicates the current
- # length of the selected representation.
- #
- # Will do the same below. Many servers ignore this and do not
- # send a Content-Range header with HTTP 416
- self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}"
- self.set_status(HTTPRequestRangeNotSatisfiable.status_code)
- return await super().prepare(request)
-
- # If a range request has been made, convert start, end slice
- # notation into file pointer offset and count
- if start is not None or end is not None:
- if start < 0 and end is None: # return tail of file
- start += file_size
- if start < 0:
- # if Range:bytes=-1000 in request header but file size
- # is only 200, there would be trouble without this
- start = 0
- count = file_size - start
- else:
- # rfc7233:If the last-byte-pos value is
- # absent, or if the value is greater than or equal to
- # the current length of the representation data,
- # the byte range is interpreted as the remainder
- # of the representation (i.e., the server replaces the
- # value of last-byte-pos with a value that is one less than
- # the current length of the selected representation).
- count = (
- min(end if end is not None else file_size, file_size) - start
- )
-
- if start >= file_size:
- # HTTP 416 should be returned in this case.
- #
- # According to https://tools.ietf.org/html/rfc7233:
- # If a valid byte-range-set includes at least one
- # byte-range-spec with a first-byte-pos that is less than
- # the current length of the representation, or at least one
- # suffix-byte-range-spec with a non-zero suffix-length,
- # then the byte-range-set is satisfiable. Otherwise, the
- # byte-range-set is unsatisfiable.
- self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}"
- self.set_status(HTTPRequestRangeNotSatisfiable.status_code)
- return await super().prepare(request)
-
- status = HTTPPartialContent.status_code
- # Even though you are sending the whole file, you should still
- # return a HTTP 206 for a Range request.
- self.set_status(status)
-
- if should_set_ct:
- self.content_type = ct # type: ignore[assignment]
- if encoding:
- self.headers[hdrs.CONTENT_ENCODING] = encoding
- if gzip:
- self.headers[hdrs.VARY] = hdrs.ACCEPT_ENCODING
-
- self.etag = etag_value # type: ignore[assignment]
- self.last_modified = st.st_mtime # type: ignore[assignment]
- self.content_length = count
-
- self.headers[hdrs.ACCEPT_RANGES] = "bytes"
-
- real_start = cast(int, start)
-
- if status == HTTPPartialContent.status_code:
- self.headers[hdrs.CONTENT_RANGE] = "bytes {}-{}/{}".format(
- real_start, real_start + count - 1, file_size
- )
-
- # If we are sending 0 bytes calling sendfile() will throw a ValueError
- if count == 0 or request.method == hdrs.METH_HEAD or self.status in [204, 304]:
- return await super().prepare(request)
-
- fobj = await loop.run_in_executor(None, filepath.open, "rb")
- if start: # be aware that start could be None or int=0 here.
- offset = start
- else:
- offset = 0
-
- try:
- return await self._sendfile(request, fobj, offset, count)
- finally:
- await loop.run_in_executor(None, fobj.close)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/utils.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/utils.py
deleted file mode 100644
index c7e3e3ddc9352b4dc71806c0ebb78cded0f837a8..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/fsspec/utils.py
+++ /dev/null
@@ -1,629 +0,0 @@
-from __future__ import annotations
-
-import contextlib
-import logging
-import math
-import os
-import pathlib
-import re
-import sys
-import tempfile
-from functools import partial
-from hashlib import md5
-from importlib.metadata import version
-from typing import (
- IO,
- TYPE_CHECKING,
- Any,
- Callable,
- Iterable,
- Iterator,
- Sequence,
- TypeVar,
-)
-from urllib.parse import urlsplit
-
-if TYPE_CHECKING:
- from typing_extensions import TypeGuard
-
- from fsspec.spec import AbstractFileSystem
-
-
-DEFAULT_BLOCK_SIZE = 5 * 2**20
-
-T = TypeVar("T")
-
-
-def infer_storage_options(
- urlpath: str, inherit_storage_options: dict[str, Any] | None = None
-) -> dict[str, Any]:
- """Infer storage options from URL path and merge it with existing storage
- options.
-
- Parameters
- ----------
- urlpath: str or unicode
- Either local absolute file path or URL (hdfs://namenode:8020/file.csv)
- inherit_storage_options: dict (optional)
- Its contents will get merged with the inferred information from the
- given path
-
- Returns
- -------
- Storage options dict.
-
- Examples
- --------
- >>> infer_storage_options('/mnt/datasets/test.csv') # doctest: +SKIP
- {"protocol": "file", "path", "/mnt/datasets/test.csv"}
- >>> infer_storage_options(
- ... 'hdfs://username:pwd@node:123/mnt/datasets/test.csv?q=1',
- ... inherit_storage_options={'extra': 'value'},
- ... ) # doctest: +SKIP
- {"protocol": "hdfs", "username": "username", "password": "pwd",
- "host": "node", "port": 123, "path": "/mnt/datasets/test.csv",
- "url_query": "q=1", "extra": "value"}
- """
- # Handle Windows paths including disk name in this special case
- if (
- re.match(r"^[a-zA-Z]:[\\/]", urlpath)
- or re.match(r"^[a-zA-Z0-9]+://", urlpath) is None
- ):
- return {"protocol": "file", "path": urlpath}
-
- parsed_path = urlsplit(urlpath)
- protocol = parsed_path.scheme or "file"
- if parsed_path.fragment:
- path = "#".join([parsed_path.path, parsed_path.fragment])
- else:
- path = parsed_path.path
- if protocol == "file":
- # Special case parsing file protocol URL on Windows according to:
- # https://msdn.microsoft.com/en-us/library/jj710207.aspx
- windows_path = re.match(r"^/([a-zA-Z])[:|]([\\/].*)$", path)
- if windows_path:
- path = "%s:%s" % windows_path.groups()
-
- if protocol in ["http", "https"]:
- # for HTTP, we don't want to parse, as requests will anyway
- return {"protocol": protocol, "path": urlpath}
-
- options: dict[str, Any] = {"protocol": protocol, "path": path}
-
- if parsed_path.netloc:
- # Parse `hostname` from netloc manually because `parsed_path.hostname`
- # lowercases the hostname which is not always desirable (e.g. in S3):
- # https://github.com/dask/dask/issues/1417
- options["host"] = parsed_path.netloc.rsplit("@", 1)[-1].rsplit(":", 1)[0]
-
- if protocol in ("s3", "s3a", "gcs", "gs"):
- options["path"] = options["host"] + options["path"]
- else:
- options["host"] = options["host"]
- if parsed_path.port:
- options["port"] = parsed_path.port
- if parsed_path.username:
- options["username"] = parsed_path.username
- if parsed_path.password:
- options["password"] = parsed_path.password
-
- if parsed_path.query:
- options["url_query"] = parsed_path.query
- if parsed_path.fragment:
- options["url_fragment"] = parsed_path.fragment
-
- if inherit_storage_options:
- update_storage_options(options, inherit_storage_options)
-
- return options
-
-
-def update_storage_options(
- options: dict[str, Any], inherited: dict[str, Any] | None = None
-) -> None:
- if not inherited:
- inherited = {}
- collisions = set(options) & set(inherited)
- if collisions:
- for collision in collisions:
- if options.get(collision) != inherited.get(collision):
- raise KeyError(
- f"Collision between inferred and specified storage "
- f"option:\n{collision}"
- )
- options.update(inherited)
-
-
-# Compression extensions registered via fsspec.compression.register_compression
-compressions: dict[str, str] = {}
-
-
-def infer_compression(filename: str) -> str | None:
- """Infer compression, if available, from filename.
-
- Infer a named compression type, if registered and available, from filename
- extension. This includes builtin (gz, bz2, zip) compressions, as well as
- optional compressions. See fsspec.compression.register_compression.
- """
- extension = os.path.splitext(filename)[-1].strip(".").lower()
- if extension in compressions:
- return compressions[extension]
- return None
-
-
-def build_name_function(max_int: float) -> Callable[[int], str]:
- """Returns a function that receives a single integer
- and returns it as a string padded by enough zero characters
- to align with maximum possible integer
-
- >>> name_f = build_name_function(57)
-
- >>> name_f(7)
- '07'
- >>> name_f(31)
- '31'
- >>> build_name_function(1000)(42)
- '0042'
- >>> build_name_function(999)(42)
- '042'
- >>> build_name_function(0)(0)
- '0'
- """
- # handle corner cases max_int is 0 or exact power of 10
- max_int += 1e-8
-
- pad_length = int(math.ceil(math.log10(max_int)))
-
- def name_function(i: int) -> str:
- return str(i).zfill(pad_length)
-
- return name_function
-
-
-def seek_delimiter(file: IO[bytes], delimiter: bytes, blocksize: int) -> bool:
- r"""Seek current file to file start, file end, or byte after delimiter seq.
-
- Seeks file to next chunk delimiter, where chunks are defined on file start,
- a delimiting sequence, and file end. Use file.tell() to see location afterwards.
- Note that file start is a valid split, so must be at offset > 0 to seek for
- delimiter.
-
- Parameters
- ----------
- file: a file
- delimiter: bytes
- a delimiter like ``b'\n'`` or message sentinel, matching file .read() type
- blocksize: int
- Number of bytes to read from the file at once.
-
-
- Returns
- -------
- Returns True if a delimiter was found, False if at file start or end.
-
- """
-
- if file.tell() == 0:
- # beginning-of-file, return without seek
- return False
-
- # Interface is for binary IO, with delimiter as bytes, but initialize last
- # with result of file.read to preserve compatibility with text IO.
- last: bytes | None = None
- while True:
- current = file.read(blocksize)
- if not current:
- # end-of-file without delimiter
- return False
- full = last + current if last else current
- try:
- if delimiter in full:
- i = full.index(delimiter)
- file.seek(file.tell() - (len(full) - i) + len(delimiter))
- return True
- elif len(current) < blocksize:
- # end-of-file without delimiter
- return False
- except (OSError, ValueError):
- pass
- last = full[-len(delimiter) :]
-
-
-def read_block(
- f: IO[bytes],
- offset: int,
- length: int | None,
- delimiter: bytes | None = None,
- split_before: bool = False,
-) -> bytes:
- """Read a block of bytes from a file
-
- Parameters
- ----------
- f: File
- Open file
- offset: int
- Byte offset to start read
- length: int
- Number of bytes to read, read through end of file if None
- delimiter: bytes (optional)
- Ensure reading starts and stops at delimiter bytestring
- split_before: bool (optional)
- Start/stop read *before* delimiter bytestring.
-
-
- If using the ``delimiter=`` keyword argument we ensure that the read
- starts and stops at delimiter boundaries that follow the locations
- ``offset`` and ``offset + length``. If ``offset`` is zero then we
- start at zero, regardless of delimiter. The bytestring returned WILL
- include the terminating delimiter string.
-
- Examples
- --------
-
- >>> from io import BytesIO # doctest: +SKIP
- >>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
- >>> read_block(f, 0, 13) # doctest: +SKIP
- b'Alice, 100\\nBo'
-
- >>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
- b'Alice, 100\\nBob, 200\\n'
-
- >>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
- b'Bob, 200\\nCharlie, 300'
- """
- if delimiter:
- f.seek(offset)
- found_start_delim = seek_delimiter(f, delimiter, 2**16)
- if length is None:
- return f.read()
- start = f.tell()
- length -= start - offset
-
- f.seek(start + length)
- found_end_delim = seek_delimiter(f, delimiter, 2**16)
- end = f.tell()
-
- # Adjust split location to before delimiter iff seek found the
- # delimiter sequence, not start or end of file.
- if found_start_delim and split_before:
- start -= len(delimiter)
-
- if found_end_delim and split_before:
- end -= len(delimiter)
-
- offset = start
- length = end - start
-
- f.seek(offset)
-
- # TODO: allow length to be None and read to the end of the file?
- assert length is not None
- b = f.read(length)
- return b
-
-
-def tokenize(*args: Any, **kwargs: Any) -> str:
- """Deterministic token
-
- (modified from dask.base)
-
- >>> tokenize([1, 2, '3'])
- '9d71491b50023b06fc76928e6eddb952'
-
- >>> tokenize('Hello') == tokenize('Hello')
- True
- """
- if kwargs:
- args += (kwargs,)
- try:
- h = md5(str(args).encode())
- except ValueError:
- # FIPS systems: https://github.com/fsspec/filesystem_spec/issues/380
- h = md5(str(args).encode(), usedforsecurity=False) # type: ignore[call-arg]
- return h.hexdigest()
-
-
-def stringify_path(filepath: str | os.PathLike[str] | pathlib.Path) -> str:
- """Attempt to convert a path-like object to a string.
-
- Parameters
- ----------
- filepath: object to be converted
-
- Returns
- -------
- filepath_str: maybe a string version of the object
-
- Notes
- -----
- Objects supporting the fspath protocol are coerced according to its
- __fspath__ method.
-
- For backwards compatibility with older Python version, pathlib.Path
- objects are specially coerced.
-
- Any other object is passed through unchanged, which includes bytes,
- strings, buffers, or anything else that's not even path-like.
- """
- if isinstance(filepath, str):
- return filepath
- elif hasattr(filepath, "__fspath__"):
- return filepath.__fspath__()
- elif isinstance(filepath, pathlib.Path):
- return str(filepath)
- elif hasattr(filepath, "path"):
- return filepath.path
- else:
- return filepath # type: ignore[return-value]
-
-
-def make_instance(
- cls: Callable[..., T], args: Sequence[Any], kwargs: dict[str, Any]
-) -> T:
- inst = cls(*args, **kwargs)
- inst._determine_worker() # type: ignore[attr-defined]
- return inst
-
-
-def common_prefix(paths: Iterable[str]) -> str:
- """For a list of paths, find the shortest prefix common to all"""
- parts = [p.split("/") for p in paths]
- lmax = min(len(p) for p in parts)
- end = 0
- for i in range(lmax):
- end = all(p[i] == parts[0][i] for p in parts)
- if not end:
- break
- i += end
- return "/".join(parts[0][:i])
-
-
-def other_paths(
- paths: list[str],
- path2: str | list[str],
- exists: bool = False,
- flatten: bool = False,
-) -> list[str]:
- """In bulk file operations, construct a new file tree from a list of files
-
- Parameters
- ----------
- paths: list of str
- The input file tree
- path2: str or list of str
- Root to construct the new list in. If this is already a list of str, we just
- assert it has the right number of elements.
- exists: bool (optional)
- For a str destination, it is already exists (and is a dir), files should
- end up inside.
- flatten: bool (optional)
- Whether to flatten the input directory tree structure so that the output files
- are in the same directory.
-
- Returns
- -------
- list of str
- """
-
- if isinstance(path2, str):
- path2 = path2.rstrip("/")
-
- if flatten:
- path2 = ["/".join((path2, p.split("/")[-1])) for p in paths]
- else:
- cp = common_prefix(paths)
- if exists:
- cp = cp.rsplit("/", 1)[0]
- if not cp and all(not s.startswith("/") for s in paths):
- path2 = ["/".join([path2, p]) for p in paths]
- else:
- path2 = [p.replace(cp, path2, 1) for p in paths]
- else:
- assert len(paths) == len(path2)
- return path2
-
-
-def is_exception(obj: Any) -> bool:
- return isinstance(obj, BaseException)
-
-
-def isfilelike(f: Any) -> TypeGuard[IO[bytes]]:
- for attr in ["read", "close", "tell"]:
- if not hasattr(f, attr):
- return False
- return True
-
-
-def get_protocol(url: str) -> str:
- parts = re.split(r"(\:\:|\://)", url, 1)
- if len(parts) > 1:
- return parts[0]
- return "file"
-
-
-def can_be_local(path: str) -> bool:
- """Can the given URL be used with open_local?"""
- from fsspec import get_filesystem_class
-
- try:
- return getattr(get_filesystem_class(get_protocol(path)), "local_file", False)
- except (ValueError, ImportError):
- # not in registry or import failed
- return False
-
-
-def get_package_version_without_import(name: str) -> str | None:
- """For given package name, try to find the version without importing it
-
- Import and package.__version__ is still the backup here, so an import
- *might* happen.
-
- Returns either the version string, or None if the package
- or the version was not readily found.
- """
- if name in sys.modules:
- mod = sys.modules[name]
- if hasattr(mod, "__version__"):
- return mod.__version__
- try:
- return version(name)
- except: # noqa: E722
- pass
- try:
- import importlib
-
- mod = importlib.import_module(name)
- return mod.__version__
- except (ImportError, AttributeError):
- return None
-
-
-def setup_logging(
- logger: logging.Logger | None = None,
- logger_name: str | None = None,
- level: str = "DEBUG",
- clear: bool = True,
-) -> logging.Logger:
- if logger is None and logger_name is None:
- raise ValueError("Provide either logger object or logger name")
- logger = logger or logging.getLogger(logger_name)
- handle = logging.StreamHandler()
- formatter = logging.Formatter(
- "%(asctime)s - %(name)s - %(levelname)s - %(funcName)s -- %(message)s"
- )
- handle.setFormatter(formatter)
- if clear:
- logger.handlers.clear()
- logger.addHandler(handle)
- logger.setLevel(level)
- return logger
-
-
-def _unstrip_protocol(name: str, fs: AbstractFileSystem) -> str:
- return fs.unstrip_protocol(name)
-
-
-def mirror_from(
- origin_name: str, methods: Iterable[str]
-) -> Callable[[type[T]], type[T]]:
- """Mirror attributes and methods from the given
- origin_name attribute of the instance to the
- decorated class"""
-
- def origin_getter(method: str, self: Any) -> Any:
- origin = getattr(self, origin_name)
- return getattr(origin, method)
-
- def wrapper(cls: type[T]) -> type[T]:
- for method in methods:
- wrapped_method = partial(origin_getter, method)
- setattr(cls, method, property(wrapped_method))
- return cls
-
- return wrapper
-
-
-@contextlib.contextmanager
-def nullcontext(obj: T) -> Iterator[T]:
- yield obj
-
-
-def merge_offset_ranges(
- paths: list[str],
- starts: list[int] | int,
- ends: list[int] | int,
- max_gap: int = 0,
- max_block: int | None = None,
- sort: bool = True,
-) -> tuple[list[str], list[int], list[int]]:
- """Merge adjacent byte-offset ranges when the inter-range
- gap is <= `max_gap`, and when the merged byte range does not
- exceed `max_block` (if specified). By default, this function
- will re-order the input paths and byte ranges to ensure sorted
- order. If the user can guarantee that the inputs are already
- sorted, passing `sort=False` will skip the re-ordering.
- """
- # Check input
- if not isinstance(paths, list):
- raise TypeError
- if not isinstance(starts, list):
- starts = [starts] * len(paths)
- if not isinstance(ends, list):
- ends = [ends] * len(paths)
- if len(starts) != len(paths) or len(ends) != len(paths):
- raise ValueError
-
- # Early Return
- if len(starts) <= 1:
- return paths, starts, ends
-
- starts = [s or 0 for s in starts]
- # Sort by paths and then ranges if `sort=True`
- if sort:
- paths, starts, ends = (
- list(v)
- for v in zip(
- *sorted(
- zip(paths, starts, ends),
- )
- )
- )
-
- if paths:
- # Loop through the coupled `paths`, `starts`, and
- # `ends`, and merge adjacent blocks when appropriate
- new_paths = paths[:1]
- new_starts = starts[:1]
- new_ends = ends[:1]
- for i in range(1, len(paths)):
- if paths[i] == paths[i - 1] and new_ends[-1] is None:
- continue
- elif (
- paths[i] != paths[i - 1]
- or ((starts[i] - new_ends[-1]) > max_gap)
- or (max_block is not None and (ends[i] - new_starts[-1]) > max_block)
- ):
- # Cannot merge with previous block.
- # Add new `paths`, `starts`, and `ends` elements
- new_paths.append(paths[i])
- new_starts.append(starts[i])
- new_ends.append(ends[i])
- else:
- # Merge with previous block by updating the
- # last element of `ends`
- new_ends[-1] = ends[i]
- return new_paths, new_starts, new_ends
-
- # `paths` is empty. Just return input lists
- return paths, starts, ends
-
-
-def file_size(filelike: IO[bytes]) -> int:
- """Find length of any open read-mode file-like"""
- pos = filelike.tell()
- try:
- return filelike.seek(0, 2)
- finally:
- filelike.seek(pos)
-
-
-@contextlib.contextmanager
-def atomic_write(path: str, mode: str = "wb"):
- """
- A context manager that opens a temporary file next to `path` and, on exit,
- replaces `path` with the temporary file, thereby updating `path`
- atomically.
- """
- fd, fn = tempfile.mkstemp(
- dir=os.path.dirname(path), prefix=os.path.basename(path) + "-"
- )
- try:
- with open(fd, mode) as fp:
- yield fp
- except BaseException:
- with contextlib.suppress(FileNotFoundError):
- os.unlink(fn)
- raise
- else:
- os.replace(fn, path)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/lfs.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/lfs.py
deleted file mode 100644
index c3c890044d470a80e255e7fd80cedf21b2eb2522..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/huggingface_hub/lfs.py
+++ /dev/null
@@ -1,495 +0,0 @@
-# coding=utf-8
-# Copyright 2019-present, the HuggingFace Inc. team.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Git LFS related type definitions and utilities"""
-import io
-import os
-import re
-import warnings
-from contextlib import AbstractContextManager
-from dataclasses import dataclass
-from math import ceil
-from os.path import getsize
-from pathlib import Path
-from typing import TYPE_CHECKING, BinaryIO, Dict, Iterable, List, Optional, Tuple, TypedDict
-
-from requests.auth import HTTPBasicAuth
-
-from huggingface_hub.constants import ENDPOINT, HF_HUB_ENABLE_HF_TRANSFER, REPO_TYPES_URL_PREFIXES
-from huggingface_hub.utils import get_session
-
-from .utils import get_token_to_send, hf_raise_for_status, http_backoff, logging, validate_hf_hub_args
-from .utils.sha import sha256, sha_fileobj
-
-
-if TYPE_CHECKING:
- from ._commit_api import CommitOperationAdd
-
-logger = logging.get_logger(__name__)
-
-OID_REGEX = re.compile(r"^[0-9a-f]{40}$")
-
-LFS_MULTIPART_UPLOAD_COMMAND = "lfs-multipart-upload"
-
-LFS_HEADERS = {
- "Accept": "application/vnd.git-lfs+json",
- "Content-Type": "application/vnd.git-lfs+json",
-}
-
-
-@dataclass
-class UploadInfo:
- """
- Dataclass holding required information to determine whether a blob
- should be uploaded to the hub using the LFS protocol or the regular protocol
-
- Args:
- sha256 (`bytes`):
- SHA256 hash of the blob
- size (`int`):
- Size in bytes of the blob
- sample (`bytes`):
- First 512 bytes of the blob
- """
-
- sha256: bytes
- size: int
- sample: bytes
-
- @classmethod
- def from_path(cls, path: str):
- size = getsize(path)
- with io.open(path, "rb") as file:
- sample = file.peek(512)[:512]
- sha = sha_fileobj(file)
- return cls(size=size, sha256=sha, sample=sample)
-
- @classmethod
- def from_bytes(cls, data: bytes):
- sha = sha256(data).digest()
- return cls(size=len(data), sample=data[:512], sha256=sha)
-
- @classmethod
- def from_fileobj(cls, fileobj: BinaryIO):
- sample = fileobj.read(512)
- fileobj.seek(0, io.SEEK_SET)
- sha = sha_fileobj(fileobj)
- size = fileobj.tell()
- fileobj.seek(0, io.SEEK_SET)
- return cls(size=size, sha256=sha, sample=sample)
-
-
-@validate_hf_hub_args
-def post_lfs_batch_info(
- upload_infos: Iterable[UploadInfo],
- token: Optional[str],
- repo_type: str,
- repo_id: str,
- endpoint: Optional[str] = None,
-) -> Tuple[List[dict], List[dict]]:
- """
- Requests the LFS batch endpoint to retrieve upload instructions
-
- Learn more: https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md
-
- Args:
- upload_infos (`Iterable` of `UploadInfo`):
- `UploadInfo` for the files that are being uploaded, typically obtained
- from `CommitOperationAdd.upload_info`
- repo_type (`str`):
- Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`.
- repo_id (`str`):
- A namespace (user or an organization) and a repo name separated
- by a `/`.
- token (`str`, *optional*):
- An authentication token ( See https://huggingface.co/settings/tokens )
-
- Returns:
- `LfsBatchInfo`: 2-tuple:
- - First element is the list of upload instructions from the server
- - Second element is an list of errors, if any
-
- Raises:
- `ValueError`: If an argument is invalid or the server response is malformed
-
- `HTTPError`: If the server returned an error
- """
- endpoint = endpoint if endpoint is not None else ENDPOINT
- url_prefix = ""
- if repo_type in REPO_TYPES_URL_PREFIXES:
- url_prefix = REPO_TYPES_URL_PREFIXES[repo_type]
- batch_url = f"{endpoint}/{url_prefix}{repo_id}.git/info/lfs/objects/batch"
- resp = get_session().post(
- batch_url,
- headers=LFS_HEADERS,
- json={
- "operation": "upload",
- "transfers": ["basic", "multipart"],
- "objects": [
- {
- "oid": upload.sha256.hex(),
- "size": upload.size,
- }
- for upload in upload_infos
- ],
- "hash_algo": "sha256",
- },
- auth=HTTPBasicAuth(
- "access_token",
- get_token_to_send(token or True), # type: ignore # Token must be provided or retrieved
- ),
- )
- hf_raise_for_status(resp)
- batch_info = resp.json()
-
- objects = batch_info.get("objects", None)
- if not isinstance(objects, list):
- raise ValueError("Malformed response from server")
-
- return (
- [_validate_batch_actions(obj) for obj in objects if "error" not in obj],
- [_validate_batch_error(obj) for obj in objects if "error" in obj],
- )
-
-
-class PayloadPartT(TypedDict):
- partNumber: int
- etag: str
-
-
-class CompletionPayloadT(TypedDict):
- """Payload that will be sent to the Hub when uploading multi-part."""
-
- oid: str
- parts: List[PayloadPartT]
-
-
-def lfs_upload(operation: "CommitOperationAdd", lfs_batch_action: Dict, token: Optional[str]) -> None:
- """
- Handles uploading a given object to the Hub with the LFS protocol.
-
- Can be a No-op if the content of the file is already present on the hub large file storage.
-
- Args:
- operation (`CommitOperationAdd`):
- The add operation triggering this upload.
- lfs_batch_action (`dict`):
- Upload instructions from the LFS batch endpoint for this object. See [`~utils.lfs.post_lfs_batch_info`] for
- more details.
- token (`str`, *optional*):
- A [user access token](https://hf.co/settings/tokens) to authenticate requests against the Hub
-
- Raises:
- - `ValueError` if `lfs_batch_action` is improperly formatted
- - `HTTPError` if the upload resulted in an error
- """
- # 0. If LFS file is already present, skip upload
- _validate_batch_actions(lfs_batch_action)
- actions = lfs_batch_action.get("actions")
- if actions is None:
- # The file was already uploaded
- logger.debug(f"Content of file {operation.path_in_repo} is already present upstream - skipping upload")
- return
-
- # 1. Validate server response (check required keys in dict)
- upload_action = lfs_batch_action["actions"]["upload"]
- _validate_lfs_action(upload_action)
- verify_action = lfs_batch_action["actions"].get("verify")
- if verify_action is not None:
- _validate_lfs_action(verify_action)
-
- # 2. Upload file (either single part or multi-part)
- header = upload_action.get("header", {})
- chunk_size = header.get("chunk_size")
- if chunk_size is not None:
- try:
- chunk_size = int(chunk_size)
- except (ValueError, TypeError):
- raise ValueError(
- f"Malformed response from LFS batch endpoint: `chunk_size` should be an integer. Got '{chunk_size}'."
- )
- _upload_multi_part(operation=operation, header=header, chunk_size=chunk_size, upload_url=upload_action["href"])
- else:
- _upload_single_part(operation=operation, upload_url=upload_action["href"])
-
- # 3. Verify upload went well
- if verify_action is not None:
- _validate_lfs_action(verify_action)
- verify_resp = get_session().post(
- verify_action["href"],
- auth=HTTPBasicAuth(username="USER", password=get_token_to_send(token or True)), # type: ignore
- json={"oid": operation.upload_info.sha256.hex(), "size": operation.upload_info.size},
- )
- hf_raise_for_status(verify_resp)
- logger.debug(f"{operation.path_in_repo}: Upload successful")
-
-
-def _validate_lfs_action(lfs_action: dict):
- """validates response from the LFS batch endpoint"""
- if not (
- isinstance(lfs_action.get("href"), str)
- and (lfs_action.get("header") is None or isinstance(lfs_action.get("header"), dict))
- ):
- raise ValueError("lfs_action is improperly formatted")
- return lfs_action
-
-
-def _validate_batch_actions(lfs_batch_actions: dict):
- """validates response from the LFS batch endpoint"""
- if not (isinstance(lfs_batch_actions.get("oid"), str) and isinstance(lfs_batch_actions.get("size"), int)):
- raise ValueError("lfs_batch_actions is improperly formatted")
-
- upload_action = lfs_batch_actions.get("actions", {}).get("upload")
- verify_action = lfs_batch_actions.get("actions", {}).get("verify")
- if upload_action is not None:
- _validate_lfs_action(upload_action)
- if verify_action is not None:
- _validate_lfs_action(verify_action)
- return lfs_batch_actions
-
-
-def _validate_batch_error(lfs_batch_error: dict):
- """validates response from the LFS batch endpoint"""
- if not (isinstance(lfs_batch_error.get("oid"), str) and isinstance(lfs_batch_error.get("size"), int)):
- raise ValueError("lfs_batch_error is improperly formatted")
- error_info = lfs_batch_error.get("error")
- if not (
- isinstance(error_info, dict)
- and isinstance(error_info.get("message"), str)
- and isinstance(error_info.get("code"), int)
- ):
- raise ValueError("lfs_batch_error is improperly formatted")
- return lfs_batch_error
-
-
-def _upload_single_part(operation: "CommitOperationAdd", upload_url: str) -> None:
- """
- Uploads `fileobj` as a single PUT HTTP request (basic LFS transfer protocol)
-
- Args:
- upload_url (`str`):
- The URL to PUT the file to.
- fileobj:
- The file-like object holding the data to upload.
-
- Returns: `requests.Response`
-
- Raises: `requests.HTTPError` if the upload resulted in an error
- """
- with operation.as_file(with_tqdm=True) as fileobj:
- response = http_backoff("PUT", upload_url, data=fileobj)
- hf_raise_for_status(response)
-
-
-def _upload_multi_part(operation: "CommitOperationAdd", header: Dict, chunk_size: int, upload_url: str) -> None:
- """
- Uploads file using HF multipart LFS transfer protocol.
- """
- # 1. Get upload URLs for each part
- sorted_parts_urls = _get_sorted_parts_urls(header=header, upload_info=operation.upload_info, chunk_size=chunk_size)
-
- # 2. Upload parts (either with hf_transfer or in pure Python)
- use_hf_transfer = HF_HUB_ENABLE_HF_TRANSFER
- if (
- HF_HUB_ENABLE_HF_TRANSFER
- and not isinstance(operation.path_or_fileobj, str)
- and not isinstance(operation.path_or_fileobj, Path)
- ):
- warnings.warn(
- "hf_transfer is enabled but does not support uploading from bytes or BinaryIO, falling back to regular"
- " upload"
- )
- use_hf_transfer = False
-
- response_headers = (
- _upload_parts_hf_transfer(operation=operation, sorted_parts_urls=sorted_parts_urls, chunk_size=chunk_size)
- if use_hf_transfer
- else _upload_parts_iteratively(operation=operation, sorted_parts_urls=sorted_parts_urls, chunk_size=chunk_size)
- )
-
- # 3. Send completion request
- completion_res = get_session().post(
- upload_url,
- json=_get_completion_payload(response_headers, operation.upload_info.sha256.hex()),
- headers=LFS_HEADERS,
- )
- hf_raise_for_status(completion_res)
-
-
-def _get_sorted_parts_urls(header: Dict, upload_info: UploadInfo, chunk_size: int) -> List[str]:
- sorted_part_upload_urls = [
- upload_url
- for _, upload_url in sorted(
- [
- (int(part_num, 10), upload_url)
- for part_num, upload_url in header.items()
- if part_num.isdigit() and len(part_num) > 0
- ],
- key=lambda t: t[0],
- )
- ]
- num_parts = len(sorted_part_upload_urls)
- if num_parts != ceil(upload_info.size / chunk_size):
- raise ValueError("Invalid server response to upload large LFS file")
- return sorted_part_upload_urls
-
-
-def _get_completion_payload(response_headers: List[Dict], oid: str) -> CompletionPayloadT:
- parts: List[PayloadPartT] = []
- for part_number, header in enumerate(response_headers):
- etag = header.get("etag")
- if etag is None or etag == "":
- raise ValueError(f"Invalid etag (`{etag}`) returned for part {part_number + 1}")
- parts.append(
- {
- "partNumber": part_number + 1,
- "etag": etag,
- }
- )
- return {"oid": oid, "parts": parts}
-
-
-def _upload_parts_iteratively(
- operation: "CommitOperationAdd", sorted_parts_urls: List[str], chunk_size: int
-) -> List[Dict]:
- headers = []
- with operation.as_file(with_tqdm=True) as fileobj:
- for part_idx, part_upload_url in enumerate(sorted_parts_urls):
- with SliceFileObj(
- fileobj,
- seek_from=chunk_size * part_idx,
- read_limit=chunk_size,
- ) as fileobj_slice:
- part_upload_res = http_backoff("PUT", part_upload_url, data=fileobj_slice)
- hf_raise_for_status(part_upload_res)
- headers.append(part_upload_res.headers)
- return headers # type: ignore
-
-
-def _upload_parts_hf_transfer(
- operation: "CommitOperationAdd", sorted_parts_urls: List[str], chunk_size: int
-) -> List[Dict]:
- # Upload file using an external Rust-based package. Upload is faster but support less features (no progress bars).
- try:
- from hf_transfer import multipart_upload
- except ImportError:
- raise ValueError(
- "Fast uploading using 'hf_transfer' is enabled (HF_HUB_ENABLE_HF_TRANSFER=1) but 'hf_transfer' package is"
- " not available in your environment. Try `pip install hf_transfer`."
- )
-
- try:
- return multipart_upload(
- file_path=operation.path_or_fileobj,
- parts_urls=sorted_parts_urls,
- chunk_size=chunk_size,
- max_files=128,
- parallel_failures=127, # could be removed
- max_retries=5,
- )
- except Exception as e:
- raise RuntimeError(
- "An error occurred while uploading using `hf_transfer`. Consider disabling HF_HUB_ENABLE_HF_TRANSFER for"
- " better error handling."
- ) from e
-
-
-class SliceFileObj(AbstractContextManager):
- """
- Utility context manager to read a *slice* of a seekable file-like object as a seekable, file-like object.
-
- This is NOT thread safe
-
- Inspired by stackoverflow.com/a/29838711/593036
-
- Credits to @julien-c
-
- Args:
- fileobj (`BinaryIO`):
- A file-like object to slice. MUST implement `tell()` and `seek()` (and `read()` of course).
- `fileobj` will be reset to its original position when exiting the context manager.
- seek_from (`int`):
- The start of the slice (offset from position 0 in bytes).
- read_limit (`int`):
- The maximum number of bytes to read from the slice.
-
- Attributes:
- previous_position (`int`):
- The previous position
-
- Examples:
-
- Reading 200 bytes with an offset of 128 bytes from a file (ie bytes 128 to 327):
- ```python
- >>> with open("path/to/file", "rb") as file:
- ... with SliceFileObj(file, seek_from=128, read_limit=200) as fslice:
- ... fslice.read(...)
- ```
-
- Reading a file in chunks of 512 bytes
- ```python
- >>> import os
- >>> chunk_size = 512
- >>> file_size = os.getsize("path/to/file")
- >>> with open("path/to/file", "rb") as file:
- ... for chunk_idx in range(ceil(file_size / chunk_size)):
- ... with SliceFileObj(file, seek_from=chunk_idx * chunk_size, read_limit=chunk_size) as fslice:
- ... chunk = fslice.read(...)
-
- ```
- """
-
- def __init__(self, fileobj: BinaryIO, seek_from: int, read_limit: int):
- self.fileobj = fileobj
- self.seek_from = seek_from
- self.read_limit = read_limit
-
- def __enter__(self):
- self._previous_position = self.fileobj.tell()
- end_of_stream = self.fileobj.seek(0, os.SEEK_END)
- self._len = min(self.read_limit, end_of_stream - self.seek_from)
- # ^^ The actual number of bytes that can be read from the slice
- self.fileobj.seek(self.seek_from, io.SEEK_SET)
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.fileobj.seek(self._previous_position, io.SEEK_SET)
-
- def read(self, n: int = -1):
- pos = self.tell()
- if pos >= self._len:
- return b""
- remaining_amount = self._len - pos
- data = self.fileobj.read(remaining_amount if n < 0 else min(n, remaining_amount))
- return data
-
- def tell(self) -> int:
- return self.fileobj.tell() - self.seek_from
-
- def seek(self, offset: int, whence: int = os.SEEK_SET) -> int:
- start = self.seek_from
- end = start + self._len
- if whence in (os.SEEK_SET, os.SEEK_END):
- offset = start + offset if whence == os.SEEK_SET else end + offset
- offset = max(start, min(offset, end))
- whence = os.SEEK_SET
- elif whence == os.SEEK_CUR:
- cur_pos = self.fileobj.tell()
- offset = max(start - cur_pos, min(offset, end - cur_pos))
- else:
- raise ValueError(f"whence value {whence} is not supported")
- return self.fileobj.seek(offset, whence) - self.seek_from
-
- def __iter__(self):
- yield self.read(n=4 * 1024 * 1024)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/dviread.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/dviread.py
deleted file mode 100644
index b2177e5087bc2e471f1c060d524de176904c808f..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/dviread.py
+++ /dev/null
@@ -1,1149 +0,0 @@
-"""
-A module for reading dvi files output by TeX. Several limitations make
-this not (currently) useful as a general-purpose dvi preprocessor, but
-it is currently used by the pdf backend for processing usetex text.
-
-Interface::
-
- with Dvi(filename, 72) as dvi:
- # iterate over pages:
- for page in dvi:
- w, h, d = page.width, page.height, page.descent
- for x, y, font, glyph, width in page.text:
- fontname = font.texname
- pointsize = font.size
- ...
- for x, y, height, width in page.boxes:
- ...
-"""
-
-from collections import namedtuple
-import enum
-from functools import lru_cache, partial, wraps
-import logging
-import os
-from pathlib import Path
-import re
-import struct
-import subprocess
-import sys
-
-import numpy as np
-
-from matplotlib import _api, cbook
-
-_log = logging.getLogger(__name__)
-
-# Many dvi related files are looked for by external processes, require
-# additional parsing, and are used many times per rendering, which is why they
-# are cached using lru_cache().
-
-# Dvi is a bytecode format documented in
-# https://ctan.org/pkg/dvitype
-# https://texdoc.org/serve/dvitype.pdf/0
-#
-# The file consists of a preamble, some number of pages, a postamble,
-# and a finale. Different opcodes are allowed in different contexts,
-# so the Dvi object has a parser state:
-#
-# pre: expecting the preamble
-# outer: between pages (followed by a page or the postamble,
-# also e.g. font definitions are allowed)
-# page: processing a page
-# post_post: state after the postamble (our current implementation
-# just stops reading)
-# finale: the finale (unimplemented in our current implementation)
-
-_dvistate = enum.Enum('DviState', 'pre outer inpage post_post finale')
-
-# The marks on a page consist of text and boxes. A page also has dimensions.
-Page = namedtuple('Page', 'text boxes height width descent')
-Box = namedtuple('Box', 'x y height width')
-
-
-# Also a namedtuple, for backcompat.
-class Text(namedtuple('Text', 'x y font glyph width')):
- """
- A glyph in the dvi file.
-
- The *x* and *y* attributes directly position the glyph. The *font*,
- *glyph*, and *width* attributes are kept public for back-compatibility,
- but users wanting to draw the glyph themselves are encouraged to instead
- load the font specified by `font_path` at `font_size`, warp it with the
- effects specified by `font_effects`, and load the glyph specified by
- `glyph_name_or_index`.
- """
-
- def _get_pdftexmap_entry(self):
- return PsfontsMap(find_tex_file("pdftex.map"))[self.font.texname]
-
- @property
- def font_path(self):
- """The `~pathlib.Path` to the font for this glyph."""
- psfont = self._get_pdftexmap_entry()
- if psfont.filename is None:
- raise ValueError("No usable font file found for {} ({}); "
- "the font may lack a Type-1 version"
- .format(psfont.psname.decode("ascii"),
- psfont.texname.decode("ascii")))
- return Path(psfont.filename)
-
- @property
- def font_size(self):
- """The font size."""
- return self.font.size
-
- @property
- def font_effects(self):
- """
- The "font effects" dict for this glyph.
-
- This dict contains the values for this glyph of SlantFont and
- ExtendFont (if any), read off :file:`pdftex.map`.
- """
- return self._get_pdftexmap_entry().effects
-
- @property
- def glyph_name_or_index(self):
- """
- Either the glyph name or the native charmap glyph index.
-
- If :file:`pdftex.map` specifies an encoding for this glyph's font, that
- is a mapping of glyph indices to Adobe glyph names; use it to convert
- dvi indices to glyph names. Callers can then convert glyph names to
- glyph indices (with FT_Get_Name_Index/get_name_index), and load the
- glyph using FT_Load_Glyph/load_glyph.
-
- If :file:`pdftex.map` specifies no encoding, the indices directly map
- to the font's "native" charmap; glyphs should directly load using
- FT_Load_Char/load_char after selecting the native charmap.
- """
- entry = self._get_pdftexmap_entry()
- return (_parse_enc(entry.encoding)[self.glyph]
- if entry.encoding is not None else self.glyph)
-
-
-# Opcode argument parsing
-#
-# Each of the following functions takes a Dvi object and delta,
-# which is the difference between the opcode and the minimum opcode
-# with the same meaning. Dvi opcodes often encode the number of
-# argument bytes in this delta.
-
-def _arg_raw(dvi, delta):
- """Return *delta* without reading anything more from the dvi file."""
- return delta
-
-
-def _arg(nbytes, signed, dvi, _):
- """
- Read *nbytes* bytes, returning the bytes interpreted as a signed integer
- if *signed* is true, unsigned otherwise.
- """
- return dvi._arg(nbytes, signed)
-
-
-def _arg_slen(dvi, delta):
- """
- Read *delta* bytes, returning None if *delta* is zero, and the bytes
- interpreted as a signed integer otherwise.
- """
- if delta == 0:
- return None
- return dvi._arg(delta, True)
-
-
-def _arg_slen1(dvi, delta):
- """
- Read *delta*+1 bytes, returning the bytes interpreted as signed.
- """
- return dvi._arg(delta + 1, True)
-
-
-def _arg_ulen1(dvi, delta):
- """
- Read *delta*+1 bytes, returning the bytes interpreted as unsigned.
- """
- return dvi._arg(delta + 1, False)
-
-
-def _arg_olen1(dvi, delta):
- """
- Read *delta*+1 bytes, returning the bytes interpreted as
- unsigned integer for 0<=*delta*<3 and signed if *delta*==3.
- """
- return dvi._arg(delta + 1, delta == 3)
-
-
-_arg_mapping = dict(raw=_arg_raw,
- u1=partial(_arg, 1, False),
- u4=partial(_arg, 4, False),
- s4=partial(_arg, 4, True),
- slen=_arg_slen,
- olen1=_arg_olen1,
- slen1=_arg_slen1,
- ulen1=_arg_ulen1)
-
-
-def _dispatch(table, min, max=None, state=None, args=('raw',)):
- """
- Decorator for dispatch by opcode. Sets the values in *table*
- from *min* to *max* to this method, adds a check that the Dvi state
- matches *state* if not None, reads arguments from the file according
- to *args*.
-
- Parameters
- ----------
- table : dict[int, callable]
- The dispatch table to be filled in.
-
- min, max : int
- Range of opcodes that calls the registered function; *max* defaults to
- *min*.
-
- state : _dvistate, optional
- State of the Dvi object in which these opcodes are allowed.
-
- args : list[str], default: ['raw']
- Sequence of argument specifications:
-
- - 'raw': opcode minus minimum
- - 'u1': read one unsigned byte
- - 'u4': read four bytes, treat as an unsigned number
- - 's4': read four bytes, treat as a signed number
- - 'slen': read (opcode - minimum) bytes, treat as signed
- - 'slen1': read (opcode - minimum + 1) bytes, treat as signed
- - 'ulen1': read (opcode - minimum + 1) bytes, treat as unsigned
- - 'olen1': read (opcode - minimum + 1) bytes, treat as unsigned
- if under four bytes, signed if four bytes
- """
- def decorate(method):
- get_args = [_arg_mapping[x] for x in args]
-
- @wraps(method)
- def wrapper(self, byte):
- if state is not None and self.state != state:
- raise ValueError("state precondition failed")
- return method(self, *[f(self, byte-min) for f in get_args])
- if max is None:
- table[min] = wrapper
- else:
- for i in range(min, max+1):
- assert table[i] is None
- table[i] = wrapper
- return wrapper
- return decorate
-
-
-class Dvi:
- """
- A reader for a dvi ("device-independent") file, as produced by TeX.
-
- The current implementation can only iterate through pages in order,
- and does not even attempt to verify the postamble.
-
- This class can be used as a context manager to close the underlying
- file upon exit. Pages can be read via iteration. Here is an overly
- simple way to extract text without trying to detect whitespace::
-
- >>> with matplotlib.dviread.Dvi('input.dvi', 72) as dvi:
- ... for page in dvi:
- ... print(''.join(chr(t.glyph) for t in page.text))
- """
- # dispatch table
- _dtable = [None] * 256
- _dispatch = partial(_dispatch, _dtable)
-
- def __init__(self, filename, dpi):
- """
- Read the data from the file named *filename* and convert
- TeX's internal units to units of *dpi* per inch.
- *dpi* only sets the units and does not limit the resolution.
- Use None to return TeX's internal units.
- """
- _log.debug('Dvi: %s', filename)
- self.file = open(filename, 'rb')
- self.dpi = dpi
- self.fonts = {}
- self.state = _dvistate.pre
-
- def __enter__(self):
- """Context manager enter method, does nothing."""
- return self
-
- def __exit__(self, etype, evalue, etrace):
- """
- Context manager exit method, closes the underlying file if it is open.
- """
- self.close()
-
- def __iter__(self):
- """
- Iterate through the pages of the file.
-
- Yields
- ------
- Page
- Details of all the text and box objects on the page.
- The Page tuple contains lists of Text and Box tuples and
- the page dimensions, and the Text and Box tuples contain
- coordinates transformed into a standard Cartesian
- coordinate system at the dpi value given when initializing.
- The coordinates are floating point numbers, but otherwise
- precision is not lost and coordinate values are not clipped to
- integers.
- """
- while self._read():
- yield self._output()
-
- def close(self):
- """Close the underlying file if it is open."""
- if not self.file.closed:
- self.file.close()
-
- def _output(self):
- """
- Output the text and boxes belonging to the most recent page.
- page = dvi._output()
- """
- minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
- maxy_pure = -np.inf
- for elt in self.text + self.boxes:
- if isinstance(elt, Box):
- x, y, h, w = elt
- e = 0 # zero depth
- else: # glyph
- x, y, font, g, w = elt
- h, e = font._height_depth_of(g)
- minx = min(minx, x)
- miny = min(miny, y - h)
- maxx = max(maxx, x + w)
- maxy = max(maxy, y + e)
- maxy_pure = max(maxy_pure, y)
- if self._baseline_v is not None:
- maxy_pure = self._baseline_v # This should normally be the case.
- self._baseline_v = None
-
- if not self.text and not self.boxes: # Avoid infs/nans from inf+/-inf.
- return Page(text=[], boxes=[], width=0, height=0, descent=0)
-
- if self.dpi is None:
- # special case for ease of debugging: output raw dvi coordinates
- return Page(text=self.text, boxes=self.boxes,
- width=maxx-minx, height=maxy_pure-miny,
- descent=maxy-maxy_pure)
-
- # convert from TeX's "scaled points" to dpi units
- d = self.dpi / (72.27 * 2**16)
- descent = (maxy - maxy_pure) * d
-
- text = [Text((x-minx)*d, (maxy-y)*d - descent, f, g, w*d)
- for (x, y, f, g, w) in self.text]
- boxes = [Box((x-minx)*d, (maxy-y)*d - descent, h*d, w*d)
- for (x, y, h, w) in self.boxes]
-
- return Page(text=text, boxes=boxes, width=(maxx-minx)*d,
- height=(maxy_pure-miny)*d, descent=descent)
-
- def _read(self):
- """
- Read one page from the file. Return True if successful,
- False if there were no more pages.
- """
- # Pages appear to start with the sequence
- # bop (begin of page)
- # xxx comment
- # # if using chemformula
- # down
- # push
- # down
- # # if using xcolor
- # down
- # push
- # down (possibly multiple)
- # push <= here, v is the baseline position.
- # etc.
- # (dviasm is useful to explore this structure.)
- # Thus, we use the vertical position at the first time the stack depth
- # reaches 3, while at least three "downs" have been executed (excluding
- # those popped out (corresponding to the chemformula preamble)), as the
- # baseline (the "down" count is necessary to handle xcolor).
- down_stack = [0]
- self._baseline_v = None
- while True:
- byte = self.file.read(1)[0]
- self._dtable[byte](self, byte)
- name = self._dtable[byte].__name__
- if name == "_push":
- down_stack.append(down_stack[-1])
- elif name == "_pop":
- down_stack.pop()
- elif name == "_down":
- down_stack[-1] += 1
- if (self._baseline_v is None
- and len(getattr(self, "stack", [])) == 3
- and down_stack[-1] >= 4):
- self._baseline_v = self.v
- if byte == 140: # end of page
- return True
- if self.state is _dvistate.post_post: # end of file
- self.close()
- return False
-
- def _arg(self, nbytes, signed=False):
- """
- Read and return an integer argument *nbytes* long.
- Signedness is determined by the *signed* keyword.
- """
- buf = self.file.read(nbytes)
- value = buf[0]
- if signed and value >= 0x80:
- value = value - 0x100
- for b in buf[1:]:
- value = 0x100*value + b
- return value
-
- @_dispatch(min=0, max=127, state=_dvistate.inpage)
- def _set_char_immediate(self, char):
- self._put_char_real(char)
- self.h += self.fonts[self.f]._width_of(char)
-
- @_dispatch(min=128, max=131, state=_dvistate.inpage, args=('olen1',))
- def _set_char(self, char):
- self._put_char_real(char)
- self.h += self.fonts[self.f]._width_of(char)
-
- @_dispatch(132, state=_dvistate.inpage, args=('s4', 's4'))
- def _set_rule(self, a, b):
- self._put_rule_real(a, b)
- self.h += b
-
- @_dispatch(min=133, max=136, state=_dvistate.inpage, args=('olen1',))
- def _put_char(self, char):
- self._put_char_real(char)
-
- def _put_char_real(self, char):
- font = self.fonts[self.f]
- if font._vf is None:
- self.text.append(Text(self.h, self.v, font, char,
- font._width_of(char)))
- else:
- scale = font._scale
- for x, y, f, g, w in font._vf[char].text:
- newf = DviFont(scale=_mul2012(scale, f._scale),
- tfm=f._tfm, texname=f.texname, vf=f._vf)
- self.text.append(Text(self.h + _mul2012(x, scale),
- self.v + _mul2012(y, scale),
- newf, g, newf._width_of(g)))
- self.boxes.extend([Box(self.h + _mul2012(x, scale),
- self.v + _mul2012(y, scale),
- _mul2012(a, scale), _mul2012(b, scale))
- for x, y, a, b in font._vf[char].boxes])
-
- @_dispatch(137, state=_dvistate.inpage, args=('s4', 's4'))
- def _put_rule(self, a, b):
- self._put_rule_real(a, b)
-
- def _put_rule_real(self, a, b):
- if a > 0 and b > 0:
- self.boxes.append(Box(self.h, self.v, a, b))
-
- @_dispatch(138)
- def _nop(self, _):
- pass
-
- @_dispatch(139, state=_dvistate.outer, args=('s4',)*11)
- def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
- self.state = _dvistate.inpage
- self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
- self.stack = []
- self.text = [] # list of Text objects
- self.boxes = [] # list of Box objects
-
- @_dispatch(140, state=_dvistate.inpage)
- def _eop(self, _):
- self.state = _dvistate.outer
- del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
-
- @_dispatch(141, state=_dvistate.inpage)
- def _push(self, _):
- self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
-
- @_dispatch(142, state=_dvistate.inpage)
- def _pop(self, _):
- self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
-
- @_dispatch(min=143, max=146, state=_dvistate.inpage, args=('slen1',))
- def _right(self, b):
- self.h += b
-
- @_dispatch(min=147, max=151, state=_dvistate.inpage, args=('slen',))
- def _right_w(self, new_w):
- if new_w is not None:
- self.w = new_w
- self.h += self.w
-
- @_dispatch(min=152, max=156, state=_dvistate.inpage, args=('slen',))
- def _right_x(self, new_x):
- if new_x is not None:
- self.x = new_x
- self.h += self.x
-
- @_dispatch(min=157, max=160, state=_dvistate.inpage, args=('slen1',))
- def _down(self, a):
- self.v += a
-
- @_dispatch(min=161, max=165, state=_dvistate.inpage, args=('slen',))
- def _down_y(self, new_y):
- if new_y is not None:
- self.y = new_y
- self.v += self.y
-
- @_dispatch(min=166, max=170, state=_dvistate.inpage, args=('slen',))
- def _down_z(self, new_z):
- if new_z is not None:
- self.z = new_z
- self.v += self.z
-
- @_dispatch(min=171, max=234, state=_dvistate.inpage)
- def _fnt_num_immediate(self, k):
- self.f = k
-
- @_dispatch(min=235, max=238, state=_dvistate.inpage, args=('olen1',))
- def _fnt_num(self, new_f):
- self.f = new_f
-
- @_dispatch(min=239, max=242, args=('ulen1',))
- def _xxx(self, datalen):
- special = self.file.read(datalen)
- _log.debug(
- 'Dvi._xxx: encountered special: %s',
- ''.join([chr(ch) if 32 <= ch < 127 else '<%02x>' % ch
- for ch in special]))
-
- @_dispatch(min=243, max=246, args=('olen1', 'u4', 'u4', 'u4', 'u1', 'u1'))
- def _fnt_def(self, k, c, s, d, a, l):
- self._fnt_def_real(k, c, s, d, a, l)
-
- def _fnt_def_real(self, k, c, s, d, a, l):
- n = self.file.read(a + l)
- fontname = n[-l:].decode('ascii')
- tfm = _tfmfile(fontname)
- if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
- raise ValueError('tfm checksum mismatch: %s' % n)
- try:
- vf = _vffile(fontname)
- except FileNotFoundError:
- vf = None
- self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
-
- @_dispatch(247, state=_dvistate.pre, args=('u1', 'u4', 'u4', 'u4', 'u1'))
- def _pre(self, i, num, den, mag, k):
- self.file.read(k) # comment in the dvi file
- if i != 2:
- raise ValueError("Unknown dvi format %d" % i)
- if num != 25400000 or den != 7227 * 2**16:
- raise ValueError("Nonstandard units in dvi file")
- # meaning: TeX always uses those exact values, so it
- # should be enough for us to support those
- # (There are 72.27 pt to an inch so 7227 pt =
- # 7227 * 2**16 sp to 100 in. The numerator is multiplied
- # by 10^5 to get units of 10**-7 meters.)
- if mag != 1000:
- raise ValueError("Nonstandard magnification in dvi file")
- # meaning: LaTeX seems to frown on setting \mag, so
- # I think we can assume this is constant
- self.state = _dvistate.outer
-
- @_dispatch(248, state=_dvistate.outer)
- def _post(self, _):
- self.state = _dvistate.post_post
- # TODO: actually read the postamble and finale?
- # currently post_post just triggers closing the file
-
- @_dispatch(249)
- def _post_post(self, _):
- raise NotImplementedError
-
- @_dispatch(min=250, max=255)
- def _malformed(self, offset):
- raise ValueError(f"unknown command: byte {250 + offset}")
-
-
-class DviFont:
- """
- Encapsulation of a font that a DVI file can refer to.
-
- This class holds a font's texname and size, supports comparison,
- and knows the widths of glyphs in the same units as the AFM file.
- There are also internal attributes (for use by dviread.py) that
- are *not* used for comparison.
-
- The size is in Adobe points (converted from TeX points).
-
- Parameters
- ----------
- scale : float
- Factor by which the font is scaled from its natural size.
- tfm : Tfm
- TeX font metrics for this font
- texname : bytes
- Name of the font as used internally by TeX and friends, as an ASCII
- bytestring. This is usually very different from any external font
- names; `PsfontsMap` can be used to find the external name of the font.
- vf : Vf
- A TeX "virtual font" file, or None if this font is not virtual.
-
- Attributes
- ----------
- texname : bytes
- size : float
- Size of the font in Adobe points, converted from the slightly
- smaller TeX points.
- widths : list
- Widths of glyphs in glyph-space units, typically 1/1000ths of
- the point size.
-
- """
- __slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
-
- def __init__(self, scale, tfm, texname, vf):
- _api.check_isinstance(bytes, texname=texname)
- self._scale = scale
- self._tfm = tfm
- self.texname = texname
- self._vf = vf
- self.size = scale * (72.0 / (72.27 * 2**16))
- try:
- nchars = max(tfm.width) + 1
- except ValueError:
- nchars = 0
- self.widths = [(1000*tfm.width.get(char, 0)) >> 20
- for char in range(nchars)]
-
- def __eq__(self, other):
- return (type(self) is type(other)
- and self.texname == other.texname and self.size == other.size)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __repr__(self):
- return f"<{type(self).__name__}: {self.texname}>"
-
- def _width_of(self, char):
- """Width of char in dvi units."""
- width = self._tfm.width.get(char, None)
- if width is not None:
- return _mul2012(width, self._scale)
- _log.debug('No width for char %d in font %s.', char, self.texname)
- return 0
-
- def _height_depth_of(self, char):
- """Height and depth of char in dvi units."""
- result = []
- for metric, name in ((self._tfm.height, "height"),
- (self._tfm.depth, "depth")):
- value = metric.get(char, None)
- if value is None:
- _log.debug('No %s for char %d in font %s',
- name, char, self.texname)
- result.append(0)
- else:
- result.append(_mul2012(value, self._scale))
- # cmsyXX (symbols font) glyph 0 ("minus") has a nonzero descent
- # so that TeX aligns equations properly
- # (https://tex.stackexchange.com/q/526103/)
- # but we actually care about the rasterization depth to align
- # the dvipng-generated images.
- if re.match(br'^cmsy\d+$', self.texname) and char == 0:
- result[-1] = 0
- return result
-
-
-class Vf(Dvi):
- r"""
- A virtual font (\*.vf file) containing subroutines for dvi files.
-
- Parameters
- ----------
- filename : str or path-like
-
- Notes
- -----
- The virtual font format is a derivative of dvi:
- http://mirrors.ctan.org/info/knuth/virtual-fonts
- This class reuses some of the machinery of `Dvi`
- but replaces the `_read` loop and dispatch mechanism.
-
- Examples
- --------
- ::
-
- vf = Vf(filename)
- glyph = vf[code]
- glyph.text, glyph.boxes, glyph.width
- """
-
- def __init__(self, filename):
- super().__init__(filename, 0)
- try:
- self._first_font = None
- self._chars = {}
- self._read()
- finally:
- self.close()
-
- def __getitem__(self, code):
- return self._chars[code]
-
- def _read(self):
- """
- Read one page from the file. Return True if successful,
- False if there were no more pages.
- """
- packet_char, packet_ends = None, None
- packet_len, packet_width = None, None
- while True:
- byte = self.file.read(1)[0]
- # If we are in a packet, execute the dvi instructions
- if self.state is _dvistate.inpage:
- byte_at = self.file.tell()-1
- if byte_at == packet_ends:
- self._finalize_packet(packet_char, packet_width)
- packet_len, packet_char, packet_width = None, None, None
- # fall through to out-of-packet code
- elif byte_at > packet_ends:
- raise ValueError("Packet length mismatch in vf file")
- else:
- if byte in (139, 140) or byte >= 243:
- raise ValueError(
- "Inappropriate opcode %d in vf file" % byte)
- Dvi._dtable[byte](self, byte)
- continue
-
- # We are outside a packet
- if byte < 242: # a short packet (length given by byte)
- packet_len = byte
- packet_char, packet_width = self._arg(1), self._arg(3)
- packet_ends = self._init_packet(byte)
- self.state = _dvistate.inpage
- elif byte == 242: # a long packet
- packet_len, packet_char, packet_width = \
- [self._arg(x) for x in (4, 4, 4)]
- self._init_packet(packet_len)
- elif 243 <= byte <= 246:
- k = self._arg(byte - 242, byte == 246)
- c, s, d, a, l = [self._arg(x) for x in (4, 4, 4, 1, 1)]
- self._fnt_def_real(k, c, s, d, a, l)
- if self._first_font is None:
- self._first_font = k
- elif byte == 247: # preamble
- i, k = self._arg(1), self._arg(1)
- x = self.file.read(k)
- cs, ds = self._arg(4), self._arg(4)
- self._pre(i, x, cs, ds)
- elif byte == 248: # postamble (just some number of 248s)
- break
- else:
- raise ValueError("Unknown vf opcode %d" % byte)
-
- def _init_packet(self, pl):
- if self.state != _dvistate.outer:
- raise ValueError("Misplaced packet in vf file")
- self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
- self.stack, self.text, self.boxes = [], [], []
- self.f = self._first_font
- return self.file.tell() + pl
-
- def _finalize_packet(self, packet_char, packet_width):
- self._chars[packet_char] = Page(
- text=self.text, boxes=self.boxes, width=packet_width,
- height=None, descent=None)
- self.state = _dvistate.outer
-
- def _pre(self, i, x, cs, ds):
- if self.state is not _dvistate.pre:
- raise ValueError("pre command in middle of vf file")
- if i != 202:
- raise ValueError("Unknown vf format %d" % i)
- if len(x):
- _log.debug('vf file comment: %s', x)
- self.state = _dvistate.outer
- # cs = checksum, ds = design size
-
-
-def _mul2012(num1, num2):
- """Multiply two numbers in 20.12 fixed point format."""
- # Separated into a function because >> has surprising precedence
- return (num1*num2) >> 20
-
-
-class Tfm:
- """
- A TeX Font Metric file.
-
- This implementation covers only the bare minimum needed by the Dvi class.
-
- Parameters
- ----------
- filename : str or path-like
-
- Attributes
- ----------
- checksum : int
- Used for verifying against the dvi file.
- design_size : int
- Design size of the font (unknown units)
- width, height, depth : dict
- Dimensions of each character, need to be scaled by the factor
- specified in the dvi file. These are dicts because indexing may
- not start from 0.
- """
- __slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
-
- def __init__(self, filename):
- _log.debug('opening tfm file %s', filename)
- with open(filename, 'rb') as file:
- header1 = file.read(24)
- lh, bc, ec, nw, nh, nd = struct.unpack('!6H', header1[2:14])
- _log.debug('lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d',
- lh, bc, ec, nw, nh, nd)
- header2 = file.read(4*lh)
- self.checksum, self.design_size = struct.unpack('!2I', header2[:8])
- # there is also encoding information etc.
- char_info = file.read(4*(ec-bc+1))
- widths = struct.unpack(f'!{nw}i', file.read(4*nw))
- heights = struct.unpack(f'!{nh}i', file.read(4*nh))
- depths = struct.unpack(f'!{nd}i', file.read(4*nd))
- self.width, self.height, self.depth = {}, {}, {}
- for idx, char in enumerate(range(bc, ec+1)):
- byte0 = char_info[4*idx]
- byte1 = char_info[4*idx+1]
- self.width[char] = widths[byte0]
- self.height[char] = heights[byte1 >> 4]
- self.depth[char] = depths[byte1 & 0xf]
-
-
-PsFont = namedtuple('PsFont', 'texname psname effects encoding filename')
-
-
-class PsfontsMap:
- """
- A psfonts.map formatted file, mapping TeX fonts to PS fonts.
-
- Parameters
- ----------
- filename : str or path-like
-
- Notes
- -----
- For historical reasons, TeX knows many Type-1 fonts by different
- names than the outside world. (For one thing, the names have to
- fit in eight characters.) Also, TeX's native fonts are not Type-1
- but Metafont, which is nontrivial to convert to PostScript except
- as a bitmap. While high-quality conversions to Type-1 format exist
- and are shipped with modern TeX distributions, we need to know
- which Type-1 fonts are the counterparts of which native fonts. For
- these reasons a mapping is needed from internal font names to font
- file names.
-
- A texmf tree typically includes mapping files called e.g.
- :file:`psfonts.map`, :file:`pdftex.map`, or :file:`dvipdfm.map`.
- The file :file:`psfonts.map` is used by :program:`dvips`,
- :file:`pdftex.map` by :program:`pdfTeX`, and :file:`dvipdfm.map`
- by :program:`dvipdfm`. :file:`psfonts.map` might avoid embedding
- the 35 PostScript fonts (i.e., have no filename for them, as in
- the Times-Bold example above), while the pdf-related files perhaps
- only avoid the "Base 14" pdf fonts. But the user may have
- configured these files differently.
-
- Examples
- --------
- >>> map = PsfontsMap(find_tex_file('pdftex.map'))
- >>> entry = map[b'ptmbo8r']
- >>> entry.texname
- b'ptmbo8r'
- >>> entry.psname
- b'Times-Bold'
- >>> entry.encoding
- '/usr/local/texlive/2008/texmf-dist/fonts/enc/dvips/base/8r.enc'
- >>> entry.effects
- {'slant': 0.16700000000000001}
- >>> entry.filename
- """
- __slots__ = ('_filename', '_unparsed', '_parsed')
-
- # Create a filename -> PsfontsMap cache, so that calling
- # `PsfontsMap(filename)` with the same filename a second time immediately
- # returns the same object.
- @lru_cache
- def __new__(cls, filename):
- self = object.__new__(cls)
- self._filename = os.fsdecode(filename)
- # Some TeX distributions have enormous pdftex.map files which would
- # take hundreds of milliseconds to parse, but it is easy enough to just
- # store the unparsed lines (keyed by the first word, which is the
- # texname) and parse them on-demand.
- with open(filename, 'rb') as file:
- self._unparsed = {}
- for line in file:
- tfmname = line.split(b' ', 1)[0]
- self._unparsed.setdefault(tfmname, []).append(line)
- self._parsed = {}
- return self
-
- def __getitem__(self, texname):
- assert isinstance(texname, bytes)
- if texname in self._unparsed:
- for line in self._unparsed.pop(texname):
- if self._parse_and_cache_line(line):
- break
- try:
- return self._parsed[texname]
- except KeyError:
- raise LookupError(
- f"An associated PostScript font (required by Matplotlib) "
- f"could not be found for TeX font {texname.decode('ascii')!r} "
- f"in {self._filename!r}; this problem can often be solved by "
- f"installing a suitable PostScript font package in your TeX "
- f"package manager") from None
-
- def _parse_and_cache_line(self, line):
- """
- Parse a line in the font mapping file.
-
- The format is (partially) documented at
- http://mirrors.ctan.org/systems/doc/pdftex/manual/pdftex-a.pdf
- https://tug.org/texinfohtml/dvips.html#psfonts_002emap
- Each line can have the following fields:
-
- - tfmname (first, only required field),
- - psname (defaults to tfmname, must come immediately after tfmname if
- present),
- - fontflags (integer, must come immediately after psname if present,
- ignored by us),
- - special (SlantFont and ExtendFont, only field that is double-quoted),
- - fontfile, encodingfile (optional, prefixed by <, <<, or <[; << always
- precedes a font, <[ always precedes an encoding, < can precede either
- but then an encoding file must have extension .enc; < and << also
- request different font subsetting behaviors but we ignore that; < can
- be separated from the filename by whitespace).
-
- special, fontfile, and encodingfile can appear in any order.
- """
- # If the map file specifies multiple encodings for a font, we
- # follow pdfTeX in choosing the last one specified. Such
- # entries are probably mistakes but they have occurred.
- # https://tex.stackexchange.com/q/10826/
-
- if not line or line.startswith((b" ", b"%", b"*", b";", b"#")):
- return
- tfmname = basename = special = encodingfile = fontfile = None
- is_subsetted = is_t1 = is_truetype = False
- matches = re.finditer(br'"([^"]*)(?:"|$)|(\S+)', line)
- for match in matches:
- quoted, unquoted = match.groups()
- if unquoted:
- if unquoted.startswith(b"<<"): # font
- fontfile = unquoted[2:]
- elif unquoted.startswith(b"<["): # encoding
- encodingfile = unquoted[2:]
- elif unquoted.startswith(b"<"): # font or encoding
- word = (
- # foo
- unquoted[1:]
- # < by itself => read the next word
- or next(filter(None, next(matches).groups())))
- if word.endswith(b".enc"):
- encodingfile = word
- else:
- fontfile = word
- is_subsetted = True
- elif tfmname is None:
- tfmname = unquoted
- elif basename is None:
- basename = unquoted
- elif quoted:
- special = quoted
- effects = {}
- if special:
- words = reversed(special.split())
- for word in words:
- if word == b"SlantFont":
- effects["slant"] = float(next(words))
- elif word == b"ExtendFont":
- effects["extend"] = float(next(words))
-
- # Verify some properties of the line that would cause it to be ignored
- # otherwise.
- if fontfile is not None:
- if fontfile.endswith((b".ttf", b".ttc")):
- is_truetype = True
- elif not fontfile.endswith(b".otf"):
- is_t1 = True
- elif basename is not None:
- is_t1 = True
- if is_truetype and is_subsetted and encodingfile is None:
- return
- if not is_t1 and ("slant" in effects or "extend" in effects):
- return
- if abs(effects.get("slant", 0)) > 1:
- return
- if abs(effects.get("extend", 0)) > 2:
- return
-
- if basename is None:
- basename = tfmname
- if encodingfile is not None:
- encodingfile = find_tex_file(encodingfile)
- if fontfile is not None:
- fontfile = find_tex_file(fontfile)
- self._parsed[tfmname] = PsFont(
- texname=tfmname, psname=basename, effects=effects,
- encoding=encodingfile, filename=fontfile)
- return True
-
-
-def _parse_enc(path):
- r"""
- Parse a \*.enc file referenced from a psfonts.map style file.
-
- The format supported by this function is a tiny subset of PostScript.
-
- Parameters
- ----------
- path : `os.PathLike`
-
- Returns
- -------
- list
- The nth entry of the list is the PostScript glyph name of the nth
- glyph.
- """
- no_comments = re.sub("%.*", "", Path(path).read_text(encoding="ascii"))
- array = re.search(r"(?s)\[(.*)\]", no_comments).group(1)
- lines = [line for line in array.split() if line]
- if all(line.startswith("/") for line in lines):
- return [line[1:] for line in lines]
- else:
- raise ValueError(f"Failed to parse {path} as Postscript encoding")
-
-
-class _LuatexKpsewhich:
- @lru_cache # A singleton.
- def __new__(cls):
- self = object.__new__(cls)
- self._proc = self._new_proc()
- return self
-
- def _new_proc(self):
- return subprocess.Popen(
- ["luatex", "--luaonly",
- str(cbook._get_data_path("kpsewhich.lua"))],
- stdin=subprocess.PIPE, stdout=subprocess.PIPE)
-
- def search(self, filename):
- if self._proc.poll() is not None: # Dead, restart it.
- self._proc = self._new_proc()
- self._proc.stdin.write(os.fsencode(filename) + b"\n")
- self._proc.stdin.flush()
- out = self._proc.stdout.readline().rstrip()
- return None if out == b"nil" else os.fsdecode(out)
-
-
-@lru_cache
-def find_tex_file(filename):
- """
- Find a file in the texmf tree using kpathsea_.
-
- The kpathsea library, provided by most existing TeX distributions, both
- on Unix-like systems and on Windows (MikTeX), is invoked via a long-lived
- luatex process if luatex is installed, or via kpsewhich otherwise.
-
- .. _kpathsea: https://www.tug.org/kpathsea/
-
- Parameters
- ----------
- filename : str or path-like
-
- Raises
- ------
- FileNotFoundError
- If the file is not found.
- """
-
- # we expect these to always be ascii encoded, but use utf-8
- # out of caution
- if isinstance(filename, bytes):
- filename = filename.decode('utf-8', errors='replace')
-
- try:
- lk = _LuatexKpsewhich()
- except FileNotFoundError:
- lk = None # Fallback to directly calling kpsewhich, as below.
-
- if lk:
- path = lk.search(filename)
- else:
- if sys.platform == 'win32':
- # On Windows only, kpathsea can use utf-8 for cmd args and output.
- # The `command_line_encoding` environment variable is set to force
- # it to always use utf-8 encoding. See Matplotlib issue #11848.
- kwargs = {'env': {**os.environ, 'command_line_encoding': 'utf-8'},
- 'encoding': 'utf-8'}
- else: # On POSIX, run through the equivalent of os.fsdecode().
- kwargs = {'encoding': sys.getfilesystemencoding(),
- 'errors': 'surrogateescape'}
-
- try:
- path = (cbook._check_and_log_subprocess(['kpsewhich', filename],
- _log, **kwargs)
- .rstrip('\n'))
- except (FileNotFoundError, RuntimeError):
- path = None
-
- if path:
- return path
- else:
- raise FileNotFoundError(
- f"Matplotlib's TeX implementation searched for a file named "
- f"{filename!r} in your texmf tree, but could not find it")
-
-
-@lru_cache
-def _fontfile(cls, suffix, texname):
- return cls(find_tex_file(texname + suffix))
-
-
-_tfmfile = partial(_fontfile, Tfm, ".tfm")
-_vffile = partial(_fontfile, Vf, ".vf")
-
-
-if __name__ == '__main__':
- from argparse import ArgumentParser
- import itertools
-
- parser = ArgumentParser()
- parser.add_argument("filename")
- parser.add_argument("dpi", nargs="?", type=float, default=None)
- args = parser.parse_args()
- with Dvi(args.filename, args.dpi) as dvi:
- fontmap = PsfontsMap(find_tex_file('pdftex.map'))
- for page in dvi:
- print(f"=== new page === "
- f"(w: {page.width}, h: {page.height}, d: {page.descent})")
- for font, group in itertools.groupby(
- page.text, lambda text: text.font):
- print(f"font: {font.texname.decode('latin-1')!r}\t"
- f"scale: {font._scale / 2 ** 20}")
- print("x", "y", "glyph", "chr", "w", "(glyphs)", sep="\t")
- for text in group:
- print(text.x, text.y, text.glyph,
- chr(text.glyph) if chr(text.glyph).isprintable()
- else ".",
- text.width, sep="\t")
- if page.boxes:
- print("x", "y", "h", "w", "", "(boxes)", sep="\t")
- for box in page.boxes:
- print(box.x, box.y, box.height, box.width, sep="\t")
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/mplot3d/axes3d.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/mplot3d/axes3d.py
deleted file mode 100644
index a74c11f54e606ff6340b0826c5e999481175d5d9..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/mpl_toolkits/mplot3d/axes3d.py
+++ /dev/null
@@ -1,3448 +0,0 @@
-"""
-axes3d.py, original mplot3d version by John Porter
-Created: 23 Sep 2005
-
-Parts fixed by Reinier Heeres
-Minor additions by Ben Axelrod
-Significant updates and revisions by Ben Root
-
-Module containing Axes3D, an object which can plot 3D objects on a
-2D matplotlib figure.
-"""
-
-from collections import defaultdict
-import functools
-import itertools
-import math
-import textwrap
-
-import numpy as np
-
-import matplotlib as mpl
-from matplotlib import _api, cbook, _docstring, _preprocess_data
-import matplotlib.artist as martist
-import matplotlib.axes as maxes
-import matplotlib.collections as mcoll
-import matplotlib.colors as mcolors
-import matplotlib.image as mimage
-import matplotlib.lines as mlines
-import matplotlib.patches as mpatches
-import matplotlib.container as mcontainer
-import matplotlib.transforms as mtransforms
-from matplotlib.axes import Axes
-from matplotlib.axes._base import _axis_method_wrapper, _process_plot_format
-from matplotlib.transforms import Bbox
-from matplotlib.tri._triangulation import Triangulation
-
-from . import art3d
-from . import proj3d
-from . import axis3d
-
-
-@_docstring.interpd
-@_api.define_aliases({
- "xlim": ["xlim3d"], "ylim": ["ylim3d"], "zlim": ["zlim3d"]})
-class Axes3D(Axes):
- """
- 3D Axes object.
-
- .. note::
-
- As a user, you do not instantiate Axes directly, but use Axes creation
- methods instead; e.g. from `.pyplot` or `.Figure`:
- `~.pyplot.subplots`, `~.pyplot.subplot_mosaic` or `.Figure.add_axes`.
- """
- name = '3d'
-
- _axis_names = ("x", "y", "z")
- Axes._shared_axes["z"] = cbook.Grouper()
- Axes._shared_axes["view"] = cbook.Grouper()
-
- vvec = _api.deprecate_privatize_attribute("3.7")
- eye = _api.deprecate_privatize_attribute("3.7")
- sx = _api.deprecate_privatize_attribute("3.7")
- sy = _api.deprecate_privatize_attribute("3.7")
-
- def __init__(
- self, fig, rect=None, *args,
- elev=30, azim=-60, roll=0, sharez=None, proj_type='persp',
- box_aspect=None, computed_zorder=True, focal_length=None,
- shareview=None,
- **kwargs):
- """
- Parameters
- ----------
- fig : Figure
- The parent figure.
- rect : tuple (left, bottom, width, height), default: None.
- The ``(left, bottom, width, height)`` axes position.
- elev : float, default: 30
- The elevation angle in degrees rotates the camera above and below
- the x-y plane, with a positive angle corresponding to a location
- above the plane.
- azim : float, default: -60
- The azimuthal angle in degrees rotates the camera about the z axis,
- with a positive angle corresponding to a right-handed rotation. In
- other words, a positive azimuth rotates the camera about the origin
- from its location along the +x axis towards the +y axis.
- roll : float, default: 0
- The roll angle in degrees rotates the camera about the viewing
- axis. A positive angle spins the camera clockwise, causing the
- scene to rotate counter-clockwise.
- sharez : Axes3D, optional
- Other Axes to share z-limits with.
- proj_type : {'persp', 'ortho'}
- The projection type, default 'persp'.
- box_aspect : 3-tuple of floats, default: None
- Changes the physical dimensions of the Axes3D, such that the ratio
- of the axis lengths in display units is x:y:z.
- If None, defaults to 4:4:3
- computed_zorder : bool, default: True
- If True, the draw order is computed based on the average position
- of the `.Artist`\\s along the view direction.
- Set to False if you want to manually control the order in which
- Artists are drawn on top of each other using their *zorder*
- attribute. This can be used for fine-tuning if the automatic order
- does not produce the desired result. Note however, that a manual
- zorder will only be correct for a limited view angle. If the figure
- is rotated by the user, it will look wrong from certain angles.
- focal_length : float, default: None
- For a projection type of 'persp', the focal length of the virtual
- camera. Must be > 0. If None, defaults to 1.
- For a projection type of 'ortho', must be set to either None
- or infinity (numpy.inf). If None, defaults to infinity.
- The focal length can be computed from a desired Field Of View via
- the equation: focal_length = 1/tan(FOV/2)
- shareview : Axes3D, optional
- Other Axes to share view angles with.
-
- **kwargs
- Other optional keyword arguments:
-
- %(Axes3D:kwdoc)s
- """
-
- if rect is None:
- rect = [0.0, 0.0, 1.0, 1.0]
-
- self.initial_azim = azim
- self.initial_elev = elev
- self.initial_roll = roll
- self.set_proj_type(proj_type, focal_length)
- self.computed_zorder = computed_zorder
-
- self.xy_viewLim = Bbox.unit()
- self.zz_viewLim = Bbox.unit()
- self.xy_dataLim = Bbox.unit()
- # z-limits are encoded in the x-component of the Bbox, y is un-used
- self.zz_dataLim = Bbox.unit()
-
- # inhibit autoscale_view until the axes are defined
- # they can't be defined until Axes.__init__ has been called
- self.view_init(self.initial_elev, self.initial_azim, self.initial_roll)
-
- self._sharez = sharez
- if sharez is not None:
- self._shared_axes["z"].join(self, sharez)
- self._adjustable = 'datalim'
-
- self._shareview = shareview
- if shareview is not None:
- self._shared_axes["view"].join(self, shareview)
-
- if kwargs.pop('auto_add_to_figure', False):
- raise AttributeError(
- 'auto_add_to_figure is no longer supported for Axes3D. '
- 'Use fig.add_axes(ax) instead.'
- )
-
- super().__init__(
- fig, rect, frameon=True, box_aspect=box_aspect, *args, **kwargs
- )
- # Disable drawing of axes by base class
- super().set_axis_off()
- # Enable drawing of axes by Axes3D class
- self.set_axis_on()
- self.M = None
- self.invM = None
-
- # func used to format z -- fall back on major formatters
- self.fmt_zdata = None
-
- self.mouse_init()
- self.figure.canvas.callbacks._connect_picklable(
- 'motion_notify_event', self._on_move)
- self.figure.canvas.callbacks._connect_picklable(
- 'button_press_event', self._button_press)
- self.figure.canvas.callbacks._connect_picklable(
- 'button_release_event', self._button_release)
- self.set_top_view()
-
- self.patch.set_linewidth(0)
- # Calculate the pseudo-data width and height
- pseudo_bbox = self.transLimits.inverted().transform([(0, 0), (1, 1)])
- self._pseudo_w, self._pseudo_h = pseudo_bbox[1] - pseudo_bbox[0]
-
- # mplot3d currently manages its own spines and needs these turned off
- # for bounding box calculations
- self.spines[:].set_visible(False)
-
- def set_axis_off(self):
- self._axis3don = False
- self.stale = True
-
- def set_axis_on(self):
- self._axis3don = True
- self.stale = True
-
- def convert_zunits(self, z):
- """
- For artists in an Axes, if the zaxis has units support,
- convert *z* using zaxis unit type
- """
- return self.zaxis.convert_units(z)
-
- def set_top_view(self):
- # this happens to be the right view for the viewing coordinates
- # moved up and to the left slightly to fit labels and axes
- xdwl = 0.95 / self._dist
- xdw = 0.9 / self._dist
- ydwl = 0.95 / self._dist
- ydw = 0.9 / self._dist
- # Set the viewing pane.
- self.viewLim.intervalx = (-xdwl, xdw)
- self.viewLim.intervaly = (-ydwl, ydw)
- self.stale = True
-
- def _init_axis(self):
- """Init 3D axes; overrides creation of regular X/Y axes."""
- self.xaxis = axis3d.XAxis(self)
- self.yaxis = axis3d.YAxis(self)
- self.zaxis = axis3d.ZAxis(self)
-
- def get_zaxis(self):
- """Return the ``ZAxis`` (`~.axis3d.Axis`) instance."""
- return self.zaxis
-
- get_zgridlines = _axis_method_wrapper("zaxis", "get_gridlines")
- get_zticklines = _axis_method_wrapper("zaxis", "get_ticklines")
-
- @_api.deprecated("3.7")
- def unit_cube(self, vals=None):
- return self._unit_cube(vals)
-
- def _unit_cube(self, vals=None):
- minx, maxx, miny, maxy, minz, maxz = vals or self.get_w_lims()
- return [(minx, miny, minz),
- (maxx, miny, minz),
- (maxx, maxy, minz),
- (minx, maxy, minz),
- (minx, miny, maxz),
- (maxx, miny, maxz),
- (maxx, maxy, maxz),
- (minx, maxy, maxz)]
-
- @_api.deprecated("3.7")
- def tunit_cube(self, vals=None, M=None):
- return self._tunit_cube(vals, M)
-
- def _tunit_cube(self, vals=None, M=None):
- if M is None:
- M = self.M
- xyzs = self._unit_cube(vals)
- tcube = proj3d._proj_points(xyzs, M)
- return tcube
-
- @_api.deprecated("3.7")
- def tunit_edges(self, vals=None, M=None):
- return self._tunit_edges(vals, M)
-
- def _tunit_edges(self, vals=None, M=None):
- tc = self._tunit_cube(vals, M)
- edges = [(tc[0], tc[1]),
- (tc[1], tc[2]),
- (tc[2], tc[3]),
- (tc[3], tc[0]),
-
- (tc[0], tc[4]),
- (tc[1], tc[5]),
- (tc[2], tc[6]),
- (tc[3], tc[7]),
-
- (tc[4], tc[5]),
- (tc[5], tc[6]),
- (tc[6], tc[7]),
- (tc[7], tc[4])]
- return edges
-
- def set_aspect(self, aspect, adjustable=None, anchor=None, share=False):
- """
- Set the aspect ratios.
-
- Parameters
- ----------
- aspect : {'auto', 'equal', 'equalxy', 'equalxz', 'equalyz'}
- Possible values:
-
- ========= ==================================================
- value description
- ========= ==================================================
- 'auto' automatic; fill the position rectangle with data.
- 'equal' adapt all the axes to have equal aspect ratios.
- 'equalxy' adapt the x and y axes to have equal aspect ratios.
- 'equalxz' adapt the x and z axes to have equal aspect ratios.
- 'equalyz' adapt the y and z axes to have equal aspect ratios.
- ========= ==================================================
-
- adjustable : None or {'box', 'datalim'}, optional
- If not *None*, this defines which parameter will be adjusted to
- meet the required aspect. See `.set_adjustable` for further
- details.
-
- anchor : None or str or 2-tuple of float, optional
- If not *None*, this defines where the Axes will be drawn if there
- is extra space due to aspect constraints. The most common way to
- specify the anchor are abbreviations of cardinal directions:
-
- ===== =====================
- value description
- ===== =====================
- 'C' centered
- 'SW' lower left corner
- 'S' middle of bottom edge
- 'SE' lower right corner
- etc.
- ===== =====================
-
- See `~.Axes.set_anchor` for further details.
-
- share : bool, default: False
- If ``True``, apply the settings to all shared Axes.
-
- See Also
- --------
- mpl_toolkits.mplot3d.axes3d.Axes3D.set_box_aspect
- """
- _api.check_in_list(('auto', 'equal', 'equalxy', 'equalyz', 'equalxz'),
- aspect=aspect)
- super().set_aspect(
- aspect='auto', adjustable=adjustable, anchor=anchor, share=share)
- self._aspect = aspect
-
- if aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'):
- ax_indices = self._equal_aspect_axis_indices(aspect)
-
- view_intervals = np.array([self.xaxis.get_view_interval(),
- self.yaxis.get_view_interval(),
- self.zaxis.get_view_interval()])
- ptp = np.ptp(view_intervals, axis=1)
- if self._adjustable == 'datalim':
- mean = np.mean(view_intervals, axis=1)
- scale = max(ptp[ax_indices] / self._box_aspect[ax_indices])
- deltas = scale * self._box_aspect
-
- for i, set_lim in enumerate((self.set_xlim3d,
- self.set_ylim3d,
- self.set_zlim3d)):
- if i in ax_indices:
- set_lim(mean[i] - deltas[i]/2., mean[i] + deltas[i]/2.)
- else: # 'box'
- # Change the box aspect such that the ratio of the length of
- # the unmodified axis to the length of the diagonal
- # perpendicular to it remains unchanged.
- box_aspect = np.array(self._box_aspect)
- box_aspect[ax_indices] = ptp[ax_indices]
- remaining_ax_indices = {0, 1, 2}.difference(ax_indices)
- if remaining_ax_indices:
- remaining = remaining_ax_indices.pop()
- old_diag = np.linalg.norm(self._box_aspect[ax_indices])
- new_diag = np.linalg.norm(box_aspect[ax_indices])
- box_aspect[remaining] *= new_diag / old_diag
- self.set_box_aspect(box_aspect)
-
- def _equal_aspect_axis_indices(self, aspect):
- """
- Get the indices for which of the x, y, z axes are constrained to have
- equal aspect ratios.
-
- Parameters
- ----------
- aspect : {'auto', 'equal', 'equalxy', 'equalxz', 'equalyz'}
- See descriptions in docstring for `.set_aspect()`.
- """
- ax_indices = [] # aspect == 'auto'
- if aspect == 'equal':
- ax_indices = [0, 1, 2]
- elif aspect == 'equalxy':
- ax_indices = [0, 1]
- elif aspect == 'equalxz':
- ax_indices = [0, 2]
- elif aspect == 'equalyz':
- ax_indices = [1, 2]
- return ax_indices
-
- def set_box_aspect(self, aspect, *, zoom=1):
- """
- Set the Axes box aspect.
-
- The box aspect is the ratio of height to width in display
- units for each face of the box when viewed perpendicular to
- that face. This is not to be confused with the data aspect (see
- `~.Axes3D.set_aspect`). The default ratios are 4:4:3 (x:y:z).
-
- To simulate having equal aspect in data space, set the box
- aspect to match your data range in each dimension.
-
- *zoom* controls the overall size of the Axes3D in the figure.
-
- Parameters
- ----------
- aspect : 3-tuple of floats or None
- Changes the physical dimensions of the Axes3D, such that the ratio
- of the axis lengths in display units is x:y:z.
- If None, defaults to (4, 4, 3).
-
- zoom : float, default: 1
- Control overall size of the Axes3D in the figure. Must be > 0.
- """
- if zoom <= 0:
- raise ValueError(f'Argument zoom = {zoom} must be > 0')
-
- if aspect is None:
- aspect = np.asarray((4, 4, 3), dtype=float)
- else:
- aspect = np.asarray(aspect, dtype=float)
- _api.check_shape((3,), aspect=aspect)
- # default scale tuned to match the mpl32 appearance.
- aspect *= 1.8294640721620434 * zoom / np.linalg.norm(aspect)
-
- self._box_aspect = aspect
- self.stale = True
-
- def apply_aspect(self, position=None):
- if position is None:
- position = self.get_position(original=True)
-
- # in the superclass, we would go through and actually deal with axis
- # scales and box/datalim. Those are all irrelevant - all we need to do
- # is make sure our coordinate system is square.
- trans = self.get_figure().transSubfigure
- bb = mtransforms.Bbox.unit().transformed(trans)
- # this is the physical aspect of the panel (or figure):
- fig_aspect = bb.height / bb.width
-
- box_aspect = 1
- pb = position.frozen()
- pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
- self._set_position(pb1.anchored(self.get_anchor(), pb), 'active')
-
- @martist.allow_rasterization
- def draw(self, renderer):
- if not self.get_visible():
- return
- self._unstale_viewLim()
-
- # draw the background patch
- self.patch.draw(renderer)
- self._frameon = False
-
- # first, set the aspect
- # this is duplicated from `axes._base._AxesBase.draw`
- # but must be called before any of the artist are drawn as
- # it adjusts the view limits and the size of the bounding box
- # of the Axes
- locator = self.get_axes_locator()
- self.apply_aspect(locator(self, renderer) if locator else None)
-
- # add the projection matrix to the renderer
- self.M = self.get_proj()
- self.invM = np.linalg.inv(self.M)
-
- collections_and_patches = (
- artist for artist in self._children
- if isinstance(artist, (mcoll.Collection, mpatches.Patch))
- and artist.get_visible())
- if self.computed_zorder:
- # Calculate projection of collections and patches and zorder
- # them. Make sure they are drawn above the grids.
- zorder_offset = max(axis.get_zorder()
- for axis in self._axis_map.values()) + 1
- collection_zorder = patch_zorder = zorder_offset
-
- for artist in sorted(collections_and_patches,
- key=lambda artist: artist.do_3d_projection(),
- reverse=True):
- if isinstance(artist, mcoll.Collection):
- artist.zorder = collection_zorder
- collection_zorder += 1
- elif isinstance(artist, mpatches.Patch):
- artist.zorder = patch_zorder
- patch_zorder += 1
- else:
- for artist in collections_and_patches:
- artist.do_3d_projection()
-
- if self._axis3don:
- # Draw panes first
- for axis in self._axis_map.values():
- axis.draw_pane(renderer)
- # Then gridlines
- for axis in self._axis_map.values():
- axis.draw_grid(renderer)
- # Then axes, labels, text, and ticks
- for axis in self._axis_map.values():
- axis.draw(renderer)
-
- # Then rest
- super().draw(renderer)
-
- def get_axis_position(self):
- vals = self.get_w_lims()
- tc = self._tunit_cube(vals, self.M)
- xhigh = tc[1][2] > tc[2][2]
- yhigh = tc[3][2] > tc[2][2]
- zhigh = tc[0][2] > tc[2][2]
- return xhigh, yhigh, zhigh
-
- def update_datalim(self, xys, **kwargs):
- """
- Not implemented in `~mpl_toolkits.mplot3d.axes3d.Axes3D`.
- """
- pass
-
- get_autoscalez_on = _axis_method_wrapper("zaxis", "_get_autoscale_on")
- set_autoscalez_on = _axis_method_wrapper("zaxis", "_set_autoscale_on")
-
- def set_zmargin(self, m):
- """
- Set padding of Z data limits prior to autoscaling.
-
- *m* times the data interval will be added to each end of that interval
- before it is used in autoscaling. If *m* is negative, this will clip
- the data range instead of expanding it.
-
- For example, if your data is in the range [0, 2], a margin of 0.1 will
- result in a range [-0.2, 2.2]; a margin of -0.1 will result in a range
- of [0.2, 1.8].
-
- Parameters
- ----------
- m : float greater than -0.5
- """
- if m <= -0.5:
- raise ValueError("margin must be greater than -0.5")
- self._zmargin = m
- self._request_autoscale_view("z")
- self.stale = True
-
- def margins(self, *margins, x=None, y=None, z=None, tight=True):
- """
- Set or retrieve autoscaling margins.
-
- See `.Axes.margins` for full documentation. Because this function
- applies to 3D Axes, it also takes a *z* argument, and returns
- ``(xmargin, ymargin, zmargin)``.
- """
- if margins and (x is not None or y is not None or z is not None):
- raise TypeError('Cannot pass both positional and keyword '
- 'arguments for x, y, and/or z.')
- elif len(margins) == 1:
- x = y = z = margins[0]
- elif len(margins) == 3:
- x, y, z = margins
- elif margins:
- raise TypeError('Must pass a single positional argument for all '
- 'margins, or one for each margin (x, y, z).')
-
- if x is None and y is None and z is None:
- if tight is not True:
- _api.warn_external(f'ignoring tight={tight!r} in get mode')
- return self._xmargin, self._ymargin, self._zmargin
-
- if x is not None:
- self.set_xmargin(x)
- if y is not None:
- self.set_ymargin(y)
- if z is not None:
- self.set_zmargin(z)
-
- self.autoscale_view(
- tight=tight, scalex=(x is not None), scaley=(y is not None),
- scalez=(z is not None)
- )
-
- def autoscale(self, enable=True, axis='both', tight=None):
- """
- Convenience method for simple axis view autoscaling.
-
- See `.Axes.autoscale` for full documentation. Because this function
- applies to 3D Axes, *axis* can also be set to 'z', and setting *axis*
- to 'both' autoscales all three axes.
- """
- if enable is None:
- scalex = True
- scaley = True
- scalez = True
- else:
- if axis in ['x', 'both']:
- self.set_autoscalex_on(bool(enable))
- scalex = self.get_autoscalex_on()
- else:
- scalex = False
- if axis in ['y', 'both']:
- self.set_autoscaley_on(bool(enable))
- scaley = self.get_autoscaley_on()
- else:
- scaley = False
- if axis in ['z', 'both']:
- self.set_autoscalez_on(bool(enable))
- scalez = self.get_autoscalez_on()
- else:
- scalez = False
- if scalex:
- self._request_autoscale_view("x", tight=tight)
- if scaley:
- self._request_autoscale_view("y", tight=tight)
- if scalez:
- self._request_autoscale_view("z", tight=tight)
-
- def auto_scale_xyz(self, X, Y, Z=None, had_data=None):
- # This updates the bounding boxes as to keep a record as to what the
- # minimum sized rectangular volume holds the data.
- if np.shape(X) == np.shape(Y):
- self.xy_dataLim.update_from_data_xy(
- np.column_stack([np.ravel(X), np.ravel(Y)]), not had_data)
- else:
- self.xy_dataLim.update_from_data_x(X, not had_data)
- self.xy_dataLim.update_from_data_y(Y, not had_data)
- if Z is not None:
- self.zz_dataLim.update_from_data_x(Z, not had_data)
- # Let autoscale_view figure out how to use this data.
- self.autoscale_view()
-
- def autoscale_view(self, tight=None, scalex=True, scaley=True,
- scalez=True):
- """
- Autoscale the view limits using the data limits.
-
- See `.Axes.autoscale_view` for full documentation. Because this
- function applies to 3D Axes, it also takes a *scalez* argument.
- """
- # This method looks at the rectangular volume (see above)
- # of data and decides how to scale the view portal to fit it.
- if tight is None:
- _tight = self._tight
- if not _tight:
- # if image data only just use the datalim
- for artist in self._children:
- if isinstance(artist, mimage.AxesImage):
- _tight = True
- elif isinstance(artist, (mlines.Line2D, mpatches.Patch)):
- _tight = False
- break
- else:
- _tight = self._tight = bool(tight)
-
- if scalex and self.get_autoscalex_on():
- x0, x1 = self.xy_dataLim.intervalx
- xlocator = self.xaxis.get_major_locator()
- x0, x1 = xlocator.nonsingular(x0, x1)
- if self._xmargin > 0:
- delta = (x1 - x0) * self._xmargin
- x0 -= delta
- x1 += delta
- if not _tight:
- x0, x1 = xlocator.view_limits(x0, x1)
- self.set_xbound(x0, x1)
-
- if scaley and self.get_autoscaley_on():
- y0, y1 = self.xy_dataLim.intervaly
- ylocator = self.yaxis.get_major_locator()
- y0, y1 = ylocator.nonsingular(y0, y1)
- if self._ymargin > 0:
- delta = (y1 - y0) * self._ymargin
- y0 -= delta
- y1 += delta
- if not _tight:
- y0, y1 = ylocator.view_limits(y0, y1)
- self.set_ybound(y0, y1)
-
- if scalez and self.get_autoscalez_on():
- z0, z1 = self.zz_dataLim.intervalx
- zlocator = self.zaxis.get_major_locator()
- z0, z1 = zlocator.nonsingular(z0, z1)
- if self._zmargin > 0:
- delta = (z1 - z0) * self._zmargin
- z0 -= delta
- z1 += delta
- if not _tight:
- z0, z1 = zlocator.view_limits(z0, z1)
- self.set_zbound(z0, z1)
-
- def get_w_lims(self):
- """Get 3D world limits."""
- minx, maxx = self.get_xlim3d()
- miny, maxy = self.get_ylim3d()
- minz, maxz = self.get_zlim3d()
- return minx, maxx, miny, maxy, minz, maxz
-
- # set_xlim, set_ylim are directly inherited from base Axes.
- def set_zlim(self, bottom=None, top=None, *, emit=True, auto=False,
- zmin=None, zmax=None):
- """
- Set 3D z limits.
-
- See `.Axes.set_ylim` for full documentation
- """
- if top is None and np.iterable(bottom):
- bottom, top = bottom
- if zmin is not None:
- if bottom is not None:
- raise TypeError("Cannot pass both 'bottom' and 'zmin'")
- bottom = zmin
- if zmax is not None:
- if top is not None:
- raise TypeError("Cannot pass both 'top' and 'zmax'")
- top = zmax
- return self.zaxis._set_lim(bottom, top, emit=emit, auto=auto)
-
- set_xlim3d = maxes.Axes.set_xlim
- set_ylim3d = maxes.Axes.set_ylim
- set_zlim3d = set_zlim
-
- def get_xlim(self):
- # docstring inherited
- return tuple(self.xy_viewLim.intervalx)
-
- def get_ylim(self):
- # docstring inherited
- return tuple(self.xy_viewLim.intervaly)
-
- def get_zlim(self):
- """
- Return the 3D z-axis view limits.
-
- Returns
- -------
- left, right : (float, float)
- The current z-axis limits in data coordinates.
-
- See Also
- --------
- set_zlim
- set_zbound, get_zbound
- invert_zaxis, zaxis_inverted
-
- Notes
- -----
- The z-axis may be inverted, in which case the *left* value will
- be greater than the *right* value.
- """
- return tuple(self.zz_viewLim.intervalx)
-
- get_zscale = _axis_method_wrapper("zaxis", "get_scale")
-
- # Redefine all three methods to overwrite their docstrings.
- set_xscale = _axis_method_wrapper("xaxis", "_set_axes_scale")
- set_yscale = _axis_method_wrapper("yaxis", "_set_axes_scale")
- set_zscale = _axis_method_wrapper("zaxis", "_set_axes_scale")
- set_xscale.__doc__, set_yscale.__doc__, set_zscale.__doc__ = map(
- """
- Set the {}-axis scale.
-
- Parameters
- ----------
- value : {{"linear"}}
- The axis scale type to apply. 3D axes currently only support
- linear scales; other scales yield nonsensical results.
-
- **kwargs
- Keyword arguments are nominally forwarded to the scale class, but
- none of them is applicable for linear scales.
- """.format,
- ["x", "y", "z"])
-
- get_zticks = _axis_method_wrapper("zaxis", "get_ticklocs")
- set_zticks = _axis_method_wrapper("zaxis", "set_ticks")
- get_zmajorticklabels = _axis_method_wrapper("zaxis", "get_majorticklabels")
- get_zminorticklabels = _axis_method_wrapper("zaxis", "get_minorticklabels")
- get_zticklabels = _axis_method_wrapper("zaxis", "get_ticklabels")
- set_zticklabels = _axis_method_wrapper(
- "zaxis", "set_ticklabels",
- doc_sub={"Axis.set_ticks": "Axes3D.set_zticks"})
-
- zaxis_date = _axis_method_wrapper("zaxis", "axis_date")
- if zaxis_date.__doc__:
- zaxis_date.__doc__ += textwrap.dedent("""
-
- Notes
- -----
- This function is merely provided for completeness, but 3D axes do not
- support dates for ticks, and so this may not work as expected.
- """)
-
- def clabel(self, *args, **kwargs):
- """Currently not implemented for 3D axes, and returns *None*."""
- return None
-
- def view_init(self, elev=None, azim=None, roll=None, vertical_axis="z",
- share=False):
- """
- Set the elevation and azimuth of the axes in degrees (not radians).
-
- This can be used to rotate the axes programmatically.
-
- To look normal to the primary planes, the following elevation and
- azimuth angles can be used. A roll angle of 0, 90, 180, or 270 deg
- will rotate these views while keeping the axes at right angles.
-
- ========== ==== ====
- view plane elev azim
- ========== ==== ====
- XY 90 -90
- XZ 0 -90
- YZ 0 0
- -XY -90 90
- -XZ 0 90
- -YZ 0 180
- ========== ==== ====
-
- Parameters
- ----------
- elev : float, default: None
- The elevation angle in degrees rotates the camera above the plane
- pierced by the vertical axis, with a positive angle corresponding
- to a location above that plane. For example, with the default
- vertical axis of 'z', the elevation defines the angle of the camera
- location above the x-y plane.
- If None, then the initial value as specified in the `Axes3D`
- constructor is used.
- azim : float, default: None
- The azimuthal angle in degrees rotates the camera about the
- vertical axis, with a positive angle corresponding to a
- right-handed rotation. For example, with the default vertical axis
- of 'z', a positive azimuth rotates the camera about the origin from
- its location along the +x axis towards the +y axis.
- If None, then the initial value as specified in the `Axes3D`
- constructor is used.
- roll : float, default: None
- The roll angle in degrees rotates the camera about the viewing
- axis. A positive angle spins the camera clockwise, causing the
- scene to rotate counter-clockwise.
- If None, then the initial value as specified in the `Axes3D`
- constructor is used.
- vertical_axis : {"z", "x", "y"}, default: "z"
- The axis to align vertically. *azim* rotates about this axis.
- share : bool, default: False
- If ``True``, apply the settings to all Axes with shared views.
- """
-
- self._dist = 10 # The camera distance from origin. Behaves like zoom
-
- if elev is None:
- elev = self.initial_elev
- if azim is None:
- azim = self.initial_azim
- if roll is None:
- roll = self.initial_roll
- vertical_axis = _api.check_getitem(
- dict(x=0, y=1, z=2), vertical_axis=vertical_axis
- )
-
- if share:
- axes = {sibling for sibling
- in self._shared_axes['view'].get_siblings(self)}
- else:
- axes = [self]
-
- for ax in axes:
- ax.elev = elev
- ax.azim = azim
- ax.roll = roll
- ax._vertical_axis = vertical_axis
-
- def set_proj_type(self, proj_type, focal_length=None):
- """
- Set the projection type.
-
- Parameters
- ----------
- proj_type : {'persp', 'ortho'}
- The projection type.
- focal_length : float, default: None
- For a projection type of 'persp', the focal length of the virtual
- camera. Must be > 0. If None, defaults to 1.
- The focal length can be computed from a desired Field Of View via
- the equation: focal_length = 1/tan(FOV/2)
- """
- _api.check_in_list(['persp', 'ortho'], proj_type=proj_type)
- if proj_type == 'persp':
- if focal_length is None:
- focal_length = 1
- elif focal_length <= 0:
- raise ValueError(f"focal_length = {focal_length} must be "
- "greater than 0")
- self._focal_length = focal_length
- else: # 'ortho':
- if focal_length not in (None, np.inf):
- raise ValueError(f"focal_length = {focal_length} must be "
- f"None for proj_type = {proj_type}")
- self._focal_length = np.inf
-
- def _roll_to_vertical(self, arr):
- """Roll arrays to match the different vertical axis."""
- return np.roll(arr, self._vertical_axis - 2)
-
- def get_proj(self):
- """Create the projection matrix from the current viewing position."""
-
- # Transform to uniform world coordinates 0-1, 0-1, 0-1
- box_aspect = self._roll_to_vertical(self._box_aspect)
- worldM = proj3d.world_transformation(
- *self.get_xlim3d(),
- *self.get_ylim3d(),
- *self.get_zlim3d(),
- pb_aspect=box_aspect,
- )
-
- # Look into the middle of the world coordinates:
- R = 0.5 * box_aspect
-
- # elev: elevation angle in the z plane.
- # azim: azimuth angle in the xy plane.
- # Coordinates for a point that rotates around the box of data.
- # p0, p1 corresponds to rotating the box only around the vertical axis.
- # p2 corresponds to rotating the box only around the horizontal axis.
- elev_rad = np.deg2rad(self.elev)
- azim_rad = np.deg2rad(self.azim)
- p0 = np.cos(elev_rad) * np.cos(azim_rad)
- p1 = np.cos(elev_rad) * np.sin(azim_rad)
- p2 = np.sin(elev_rad)
-
- # When changing vertical axis the coordinates changes as well.
- # Roll the values to get the same behaviour as the default:
- ps = self._roll_to_vertical([p0, p1, p2])
-
- # The coordinates for the eye viewing point. The eye is looking
- # towards the middle of the box of data from a distance:
- eye = R + self._dist * ps
-
- # vvec, self._vvec and self._eye are unused, remove when deprecated
- vvec = R - eye
- self._eye = eye
- self._vvec = vvec / np.linalg.norm(vvec)
-
- # Calculate the viewing axes for the eye position
- u, v, w = self._calc_view_axes(eye)
- self._view_u = u # _view_u is towards the right of the screen
- self._view_v = v # _view_v is towards the top of the screen
- self._view_w = w # _view_w is out of the screen
-
- # Generate the view and projection transformation matrices
- if self._focal_length == np.inf:
- # Orthographic projection
- viewM = proj3d._view_transformation_uvw(u, v, w, eye)
- projM = proj3d._ortho_transformation(-self._dist, self._dist)
- else:
- # Perspective projection
- # Scale the eye dist to compensate for the focal length zoom effect
- eye_focal = R + self._dist * ps * self._focal_length
- viewM = proj3d._view_transformation_uvw(u, v, w, eye_focal)
- projM = proj3d._persp_transformation(-self._dist,
- self._dist,
- self._focal_length)
-
- # Combine all the transformation matrices to get the final projection
- M0 = np.dot(viewM, worldM)
- M = np.dot(projM, M0)
- return M
-
- def mouse_init(self, rotate_btn=1, pan_btn=2, zoom_btn=3):
- """
- Set the mouse buttons for 3D rotation and zooming.
-
- Parameters
- ----------
- rotate_btn : int or list of int, default: 1
- The mouse button or buttons to use for 3D rotation of the axes.
- pan_btn : int or list of int, default: 2
- The mouse button or buttons to use to pan the 3D axes.
- zoom_btn : int or list of int, default: 3
- The mouse button or buttons to use to zoom the 3D axes.
- """
- self.button_pressed = None
- # coerce scalars into array-like, then convert into
- # a regular list to avoid comparisons against None
- # which breaks in recent versions of numpy.
- self._rotate_btn = np.atleast_1d(rotate_btn).tolist()
- self._pan_btn = np.atleast_1d(pan_btn).tolist()
- self._zoom_btn = np.atleast_1d(zoom_btn).tolist()
-
- def disable_mouse_rotation(self):
- """Disable mouse buttons for 3D rotation, panning, and zooming."""
- self.mouse_init(rotate_btn=[], pan_btn=[], zoom_btn=[])
-
- def can_zoom(self):
- # doc-string inherited
- return True
-
- def can_pan(self):
- # doc-string inherited
- return True
-
- def sharez(self, other):
- """
- Share the z-axis with *other*.
-
- This is equivalent to passing ``sharez=other`` when constructing the
- Axes, and cannot be used if the z-axis is already being shared with
- another Axes.
- """
- _api.check_isinstance(Axes3D, other=other)
- if self._sharez is not None and other is not self._sharez:
- raise ValueError("z-axis is already shared")
- self._shared_axes["z"].join(self, other)
- self._sharez = other
- self.zaxis.major = other.zaxis.major # Ticker instances holding
- self.zaxis.minor = other.zaxis.minor # locator and formatter.
- z0, z1 = other.get_zlim()
- self.set_zlim(z0, z1, emit=False, auto=other.get_autoscalez_on())
- self.zaxis._scale = other.zaxis._scale
-
- def shareview(self, other):
- """
- Share the view angles with *other*.
-
- This is equivalent to passing ``shareview=other`` when
- constructing the Axes, and cannot be used if the view angles are
- already being shared with another Axes.
- """
- _api.check_isinstance(Axes3D, other=other)
- if self._shareview is not None and other is not self._shareview:
- raise ValueError("view angles are already shared")
- self._shared_axes["view"].join(self, other)
- self._shareview = other
- vertical_axis = {0: "x", 1: "y", 2: "z"}[other._vertical_axis]
- self.view_init(elev=other.elev, azim=other.azim, roll=other.roll,
- vertical_axis=vertical_axis, share=True)
-
- def clear(self):
- # docstring inherited.
- super().clear()
- if self._focal_length == np.inf:
- self._zmargin = mpl.rcParams['axes.zmargin']
- else:
- self._zmargin = 0.
- self.grid(mpl.rcParams['axes3d.grid'])
-
- def _button_press(self, event):
- if event.inaxes == self:
- self.button_pressed = event.button
- self._sx, self._sy = event.xdata, event.ydata
- toolbar = self.figure.canvas.toolbar
- if toolbar and toolbar._nav_stack() is None:
- toolbar.push_current()
-
- def _button_release(self, event):
- self.button_pressed = None
- toolbar = self.figure.canvas.toolbar
- # backend_bases.release_zoom and backend_bases.release_pan call
- # push_current, so check the navigation mode so we don't call it twice
- if toolbar and self.get_navigate_mode() is None:
- toolbar.push_current()
-
- def _get_view(self):
- # docstring inherited
- return {
- "xlim": self.get_xlim(), "autoscalex_on": self.get_autoscalex_on(),
- "ylim": self.get_ylim(), "autoscaley_on": self.get_autoscaley_on(),
- "zlim": self.get_zlim(), "autoscalez_on": self.get_autoscalez_on(),
- }, (self.elev, self.azim, self.roll)
-
- def _set_view(self, view):
- # docstring inherited
- props, (elev, azim, roll) = view
- self.set(**props)
- self.elev = elev
- self.azim = azim
- self.roll = roll
-
- def format_zdata(self, z):
- """
- Return *z* string formatted. This function will use the
- :attr:`fmt_zdata` attribute if it is callable, else will fall
- back on the zaxis major formatter
- """
- try:
- return self.fmt_zdata(z)
- except (AttributeError, TypeError):
- func = self.zaxis.get_major_formatter().format_data_short
- val = func(z)
- return val
-
- def format_coord(self, xv, yv, renderer=None):
- """
- Return a string giving the current view rotation angles, or the x, y, z
- coordinates of the point on the nearest axis pane underneath the mouse
- cursor, depending on the mouse button pressed.
- """
- coords = ''
-
- if self.button_pressed in self._rotate_btn:
- # ignore xv and yv and display angles instead
- coords = self._rotation_coords()
-
- elif self.M is not None:
- coords = self._location_coords(xv, yv, renderer)
-
- return coords
-
- def _rotation_coords(self):
- """
- Return the rotation angles as a string.
- """
- norm_elev = art3d._norm_angle(self.elev)
- norm_azim = art3d._norm_angle(self.azim)
- norm_roll = art3d._norm_angle(self.roll)
- coords = (f"elevation={norm_elev:.0f}\N{DEGREE SIGN}, "
- f"azimuth={norm_azim:.0f}\N{DEGREE SIGN}, "
- f"roll={norm_roll:.0f}\N{DEGREE SIGN}"
- ).replace("-", "\N{MINUS SIGN}")
- return coords
-
- def _location_coords(self, xv, yv, renderer):
- """
- Return the location on the axis pane underneath the cursor as a string.
- """
- p1, pane_idx = self._calc_coord(xv, yv, renderer)
- xs = self.format_xdata(p1[0])
- ys = self.format_ydata(p1[1])
- zs = self.format_zdata(p1[2])
- if pane_idx == 0:
- coords = f'x pane={xs}, y={ys}, z={zs}'
- elif pane_idx == 1:
- coords = f'x={xs}, y pane={ys}, z={zs}'
- elif pane_idx == 2:
- coords = f'x={xs}, y={ys}, z pane={zs}'
- return coords
-
- def _get_camera_loc(self):
- """
- Returns the current camera location in data coordinates.
- """
- cx, cy, cz, dx, dy, dz = self._get_w_centers_ranges()
- c = np.array([cx, cy, cz])
- r = np.array([dx, dy, dz])
-
- if self._focal_length == np.inf: # orthographic projection
- focal_length = 1e9 # large enough to be effectively infinite
- else: # perspective projection
- focal_length = self._focal_length
- eye = c + self._view_w * self._dist * r / self._box_aspect * focal_length
- return eye
-
- def _calc_coord(self, xv, yv, renderer=None):
- """
- Given the 2D view coordinates, find the point on the nearest axis pane
- that lies directly below those coordinates. Returns a 3D point in data
- coordinates.
- """
- if self._focal_length == np.inf: # orthographic projection
- zv = 1
- else: # perspective projection
- zv = -1 / self._focal_length
-
- # Convert point on view plane to data coordinates
- p1 = np.array(proj3d.inv_transform(xv, yv, zv, self.invM)).ravel()
-
- # Get the vector from the camera to the point on the view plane
- vec = self._get_camera_loc() - p1
-
- # Get the pane locations for each of the axes
- pane_locs = []
- for axis in self._axis_map.values():
- xys, loc = axis.active_pane(renderer)
- pane_locs.append(loc)
-
- # Find the distance to the nearest pane by projecting the view vector
- scales = np.zeros(3)
- for i in range(3):
- if vec[i] == 0:
- scales[i] = np.inf
- else:
- scales[i] = (p1[i] - pane_locs[i]) / vec[i]
- pane_idx = np.argmin(abs(scales))
- scale = scales[pane_idx]
-
- # Calculate the point on the closest pane
- p2 = p1 - scale*vec
- return p2, pane_idx
-
- def _on_move(self, event):
- """
- Mouse moving.
-
- By default, button-1 rotates, button-2 pans, and button-3 zooms;
- these buttons can be modified via `mouse_init`.
- """
-
- if not self.button_pressed:
- return
-
- if self.get_navigate_mode() is not None:
- # we don't want to rotate if we are zooming/panning
- # from the toolbar
- return
-
- if self.M is None:
- return
-
- x, y = event.xdata, event.ydata
- # In case the mouse is out of bounds.
- if x is None or event.inaxes != self:
- return
-
- dx, dy = x - self._sx, y - self._sy
- w = self._pseudo_w
- h = self._pseudo_h
-
- # Rotation
- if self.button_pressed in self._rotate_btn:
- # rotate viewing point
- # get the x and y pixel coords
- if dx == 0 and dy == 0:
- return
-
- roll = np.deg2rad(self.roll)
- delev = -(dy/h)*180*np.cos(roll) + (dx/w)*180*np.sin(roll)
- dazim = -(dy/h)*180*np.sin(roll) - (dx/w)*180*np.cos(roll)
- elev = self.elev + delev
- azim = self.azim + dazim
- self.view_init(elev=elev, azim=azim, roll=roll, share=True)
- self.stale = True
-
- # Pan
- elif self.button_pressed in self._pan_btn:
- # Start the pan event with pixel coordinates
- px, py = self.transData.transform([self._sx, self._sy])
- self.start_pan(px, py, 2)
- # pan view (takes pixel coordinate input)
- self.drag_pan(2, None, event.x, event.y)
- self.end_pan()
-
- # Zoom
- elif self.button_pressed in self._zoom_btn:
- # zoom view (dragging down zooms in)
- scale = h/(h - dy)
- self._scale_axis_limits(scale, scale, scale)
-
- # Store the event coordinates for the next time through.
- self._sx, self._sy = x, y
- # Always request a draw update at the end of interaction
- self.figure.canvas.draw_idle()
-
- def drag_pan(self, button, key, x, y):
- # docstring inherited
-
- # Get the coordinates from the move event
- p = self._pan_start
- (xdata, ydata), (xdata_start, ydata_start) = p.trans_inverse.transform(
- [(x, y), (p.x, p.y)])
- self._sx, self._sy = xdata, ydata
- # Calling start_pan() to set the x/y of this event as the starting
- # move location for the next event
- self.start_pan(x, y, button)
- du, dv = xdata - xdata_start, ydata - ydata_start
- dw = 0
- if key == 'x':
- dv = 0
- elif key == 'y':
- du = 0
- if du == 0 and dv == 0:
- return
-
- # Transform the pan from the view axes to the data axes
- R = np.array([self._view_u, self._view_v, self._view_w])
- R = -R / self._box_aspect * self._dist
- duvw_projected = R.T @ np.array([du, dv, dw])
-
- # Calculate pan distance
- minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
- dx = (maxx - minx) * duvw_projected[0]
- dy = (maxy - miny) * duvw_projected[1]
- dz = (maxz - minz) * duvw_projected[2]
-
- # Set the new axis limits
- self.set_xlim3d(minx + dx, maxx + dx)
- self.set_ylim3d(miny + dy, maxy + dy)
- self.set_zlim3d(minz + dz, maxz + dz)
-
- def _calc_view_axes(self, eye):
- """
- Get the unit vectors for the viewing axes in data coordinates.
- `u` is towards the right of the screen
- `v` is towards the top of the screen
- `w` is out of the screen
- """
- elev_rad = np.deg2rad(art3d._norm_angle(self.elev))
- roll_rad = np.deg2rad(art3d._norm_angle(self.roll))
-
- # Look into the middle of the world coordinates
- R = 0.5 * self._roll_to_vertical(self._box_aspect)
-
- # Define which axis should be vertical. A negative value
- # indicates the plot is upside down and therefore the values
- # have been reversed:
- V = np.zeros(3)
- V[self._vertical_axis] = -1 if abs(elev_rad) > np.pi/2 else 1
-
- u, v, w = proj3d._view_axes(eye, R, V, roll_rad)
- return u, v, w
-
- def _set_view_from_bbox(self, bbox, direction='in',
- mode=None, twinx=False, twiny=False):
- """
- Zoom in or out of the bounding box.
-
- Will center the view in the center of the bounding box, and zoom by
- the ratio of the size of the bounding box to the size of the Axes3D.
- """
- (start_x, start_y, stop_x, stop_y) = bbox
- if mode == 'x':
- start_y = self.bbox.min[1]
- stop_y = self.bbox.max[1]
- elif mode == 'y':
- start_x = self.bbox.min[0]
- stop_x = self.bbox.max[0]
-
- # Clip to bounding box limits
- start_x, stop_x = np.clip(sorted([start_x, stop_x]),
- self.bbox.min[0], self.bbox.max[0])
- start_y, stop_y = np.clip(sorted([start_y, stop_y]),
- self.bbox.min[1], self.bbox.max[1])
-
- # Move the center of the view to the center of the bbox
- zoom_center_x = (start_x + stop_x)/2
- zoom_center_y = (start_y + stop_y)/2
-
- ax_center_x = (self.bbox.max[0] + self.bbox.min[0])/2
- ax_center_y = (self.bbox.max[1] + self.bbox.min[1])/2
-
- self.start_pan(zoom_center_x, zoom_center_y, 2)
- self.drag_pan(2, None, ax_center_x, ax_center_y)
- self.end_pan()
-
- # Calculate zoom level
- dx = abs(start_x - stop_x)
- dy = abs(start_y - stop_y)
- scale_u = dx / (self.bbox.max[0] - self.bbox.min[0])
- scale_v = dy / (self.bbox.max[1] - self.bbox.min[1])
-
- # Keep aspect ratios equal
- scale = max(scale_u, scale_v)
-
- # Zoom out
- if direction == 'out':
- scale = 1 / scale
-
- self._zoom_data_limits(scale, scale, scale)
-
- def _zoom_data_limits(self, scale_u, scale_v, scale_w):
- """
- Zoom in or out of a 3D plot.
-
- Will scale the data limits by the scale factors. These will be
- transformed to the x, y, z data axes based on the current view angles.
- A scale factor > 1 zooms out and a scale factor < 1 zooms in.
-
- For an axes that has had its aspect ratio set to 'equal', 'equalxy',
- 'equalyz', or 'equalxz', the relevant axes are constrained to zoom
- equally.
-
- Parameters
- ----------
- scale_u : float
- Scale factor for the u view axis (view screen horizontal).
- scale_v : float
- Scale factor for the v view axis (view screen vertical).
- scale_w : float
- Scale factor for the w view axis (view screen depth).
- """
- scale = np.array([scale_u, scale_v, scale_w])
-
- # Only perform frame conversion if unequal scale factors
- if not np.allclose(scale, scale_u):
- # Convert the scale factors from the view frame to the data frame
- R = np.array([self._view_u, self._view_v, self._view_w])
- S = scale * np.eye(3)
- scale = np.linalg.norm(R.T @ S, axis=1)
-
- # Set the constrained scale factors to the factor closest to 1
- if self._aspect in ('equal', 'equalxy', 'equalxz', 'equalyz'):
- ax_idxs = self._equal_aspect_axis_indices(self._aspect)
- min_ax_idxs = np.argmin(np.abs(scale[ax_idxs] - 1))
- scale[ax_idxs] = scale[ax_idxs][min_ax_idxs]
-
- self._scale_axis_limits(scale[0], scale[1], scale[2])
-
- def _scale_axis_limits(self, scale_x, scale_y, scale_z):
- """
- Keeping the center of the x, y, and z data axes fixed, scale their
- limits by scale factors. A scale factor > 1 zooms out and a scale
- factor < 1 zooms in.
-
- Parameters
- ----------
- scale_x : float
- Scale factor for the x data axis.
- scale_y : float
- Scale factor for the y data axis.
- scale_z : float
- Scale factor for the z data axis.
- """
- # Get the axis centers and ranges
- cx, cy, cz, dx, dy, dz = self._get_w_centers_ranges()
-
- # Set the scaled axis limits
- self.set_xlim3d(cx - dx*scale_x/2, cx + dx*scale_x/2)
- self.set_ylim3d(cy - dy*scale_y/2, cy + dy*scale_y/2)
- self.set_zlim3d(cz - dz*scale_z/2, cz + dz*scale_z/2)
-
- def _get_w_centers_ranges(self):
- """Get 3D world centers and axis ranges."""
- # Calculate center of axis limits
- minx, maxx, miny, maxy, minz, maxz = self.get_w_lims()
- cx = (maxx + minx)/2
- cy = (maxy + miny)/2
- cz = (maxz + minz)/2
-
- # Calculate range of axis limits
- dx = (maxx - minx)
- dy = (maxy - miny)
- dz = (maxz - minz)
- return cx, cy, cz, dx, dy, dz
-
- def set_zlabel(self, zlabel, fontdict=None, labelpad=None, **kwargs):
- """
- Set zlabel. See doc for `.set_ylabel` for description.
- """
- if labelpad is not None:
- self.zaxis.labelpad = labelpad
- return self.zaxis.set_label_text(zlabel, fontdict, **kwargs)
-
- def get_zlabel(self):
- """
- Get the z-label text string.
- """
- label = self.zaxis.get_label()
- return label.get_text()
-
- # Axes rectangle characteristics
-
- # The frame_on methods are not available for 3D axes.
- # Python will raise a TypeError if they are called.
- get_frame_on = None
- set_frame_on = None
-
- def grid(self, visible=True, **kwargs):
- """
- Set / unset 3D grid.
-
- .. note::
-
- Currently, this function does not behave the same as
- `.axes.Axes.grid`, but it is intended to eventually support that
- behavior.
- """
- # TODO: Operate on each axes separately
- if len(kwargs):
- visible = True
- self._draw_grid = visible
- self.stale = True
-
- def tick_params(self, axis='both', **kwargs):
- """
- Convenience method for changing the appearance of ticks and
- tick labels.
-
- See `.Axes.tick_params` for full documentation. Because this function
- applies to 3D Axes, *axis* can also be set to 'z', and setting *axis*
- to 'both' autoscales all three axes.
-
- Also, because of how Axes3D objects are drawn very differently
- from regular 2D axes, some of these settings may have
- ambiguous meaning. For simplicity, the 'z' axis will
- accept settings as if it was like the 'y' axis.
-
- .. note::
- Axes3D currently ignores some of these settings.
- """
- _api.check_in_list(['x', 'y', 'z', 'both'], axis=axis)
- if axis in ['x', 'y', 'both']:
- super().tick_params(axis, **kwargs)
- if axis in ['z', 'both']:
- zkw = dict(kwargs)
- zkw.pop('top', None)
- zkw.pop('bottom', None)
- zkw.pop('labeltop', None)
- zkw.pop('labelbottom', None)
- self.zaxis.set_tick_params(**zkw)
-
- # data limits, ticks, tick labels, and formatting
-
- def invert_zaxis(self):
- """
- Invert the z-axis.
-
- See Also
- --------
- zaxis_inverted
- get_zlim, set_zlim
- get_zbound, set_zbound
- """
- bottom, top = self.get_zlim()
- self.set_zlim(top, bottom, auto=None)
-
- zaxis_inverted = _axis_method_wrapper("zaxis", "get_inverted")
-
- def get_zbound(self):
- """
- Return the lower and upper z-axis bounds, in increasing order.
-
- See Also
- --------
- set_zbound
- get_zlim, set_zlim
- invert_zaxis, zaxis_inverted
- """
- bottom, top = self.get_zlim()
- if bottom < top:
- return bottom, top
- else:
- return top, bottom
-
- def set_zbound(self, lower=None, upper=None):
- """
- Set the lower and upper numerical bounds of the z-axis.
-
- This method will honor axes inversion regardless of parameter order.
- It will not change the autoscaling setting (`.get_autoscalez_on()`).
-
- Parameters
- ----------
- lower, upper : float or None
- The lower and upper bounds. If *None*, the respective axis bound
- is not modified.
-
- See Also
- --------
- get_zbound
- get_zlim, set_zlim
- invert_zaxis, zaxis_inverted
- """
- if upper is None and np.iterable(lower):
- lower, upper = lower
-
- old_lower, old_upper = self.get_zbound()
- if lower is None:
- lower = old_lower
- if upper is None:
- upper = old_upper
-
- self.set_zlim(sorted((lower, upper),
- reverse=bool(self.zaxis_inverted())),
- auto=None)
-
- def text(self, x, y, z, s, zdir=None, **kwargs):
- """
- Add the text *s* to the 3D Axes at location *x*, *y*, *z* in data coordinates.
-
- Parameters
- ----------
- x, y, z : float
- The position to place the text.
- s : str
- The text.
- zdir : {'x', 'y', 'z', 3-tuple}, optional
- The direction to be used as the z-direction. Default: 'z'.
- See `.get_dir_vector` for a description of the values.
- **kwargs
- Other arguments are forwarded to `matplotlib.axes.Axes.text`.
-
- Returns
- -------
- `.Text3D`
- The created `.Text3D` instance.
- """
- text = super().text(x, y, s, **kwargs)
- art3d.text_2d_to_3d(text, z, zdir)
- return text
-
- text3D = text
- text2D = Axes.text
-
- def plot(self, xs, ys, *args, zdir='z', **kwargs):
- """
- Plot 2D or 3D data.
-
- Parameters
- ----------
- xs : 1D array-like
- x coordinates of vertices.
- ys : 1D array-like
- y coordinates of vertices.
- zs : float or 1D array-like
- z coordinates of vertices; either one for all points or one for
- each point.
- zdir : {'x', 'y', 'z'}, default: 'z'
- When plotting 2D data, the direction to use as z.
- **kwargs
- Other arguments are forwarded to `matplotlib.axes.Axes.plot`.
- """
- had_data = self.has_data()
-
- # `zs` can be passed positionally or as keyword; checking whether
- # args[0] is a string matches the behavior of 2D `plot` (via
- # `_process_plot_var_args`).
- if args and not isinstance(args[0], str):
- zs, *args = args
- if 'zs' in kwargs:
- raise TypeError("plot() for multiple values for argument 'z'")
- else:
- zs = kwargs.pop('zs', 0)
-
- # Match length
- zs = np.broadcast_to(zs, np.shape(xs))
-
- lines = super().plot(xs, ys, *args, **kwargs)
- for line in lines:
- art3d.line_2d_to_3d(line, zs=zs, zdir=zdir)
-
- xs, ys, zs = art3d.juggle_axes(xs, ys, zs, zdir)
- self.auto_scale_xyz(xs, ys, zs, had_data)
- return lines
-
- plot3D = plot
-
- def plot_surface(self, X, Y, Z, *, norm=None, vmin=None,
- vmax=None, lightsource=None, **kwargs):
- """
- Create a surface plot.
-
- By default, it will be colored in shades of a solid color, but it also
- supports colormapping by supplying the *cmap* argument.
-
- .. note::
-
- The *rcount* and *ccount* kwargs, which both default to 50,
- determine the maximum number of samples used in each direction. If
- the input data is larger, it will be downsampled (by slicing) to
- these numbers of points.
-
- .. note::
-
- To maximize rendering speed consider setting *rstride* and *cstride*
- to divisors of the number of rows minus 1 and columns minus 1
- respectively. For example, given 51 rows rstride can be any of the
- divisors of 50.
-
- Similarly, a setting of *rstride* and *cstride* equal to 1 (or
- *rcount* and *ccount* equal the number of rows and columns) can use
- the optimized path.
-
- Parameters
- ----------
- X, Y, Z : 2D arrays
- Data values.
-
- rcount, ccount : int
- Maximum number of samples used in each direction. If the input
- data is larger, it will be downsampled (by slicing) to these
- numbers of points. Defaults to 50.
-
- rstride, cstride : int
- Downsampling stride in each direction. These arguments are
- mutually exclusive with *rcount* and *ccount*. If only one of
- *rstride* or *cstride* is set, the other defaults to 10.
-
- 'classic' mode uses a default of ``rstride = cstride = 10`` instead
- of the new default of ``rcount = ccount = 50``.
-
- color : color-like
- Color of the surface patches.
-
- cmap : Colormap
- Colormap of the surface patches.
-
- facecolors : array-like of colors.
- Colors of each individual patch.
-
- norm : Normalize
- Normalization for the colormap.
-
- vmin, vmax : float
- Bounds for the normalization.
-
- shade : bool, default: True
- Whether to shade the facecolors. Shading is always disabled when
- *cmap* is specified.
-
- lightsource : `~matplotlib.colors.LightSource`
- The lightsource to use when *shade* is True.
-
- **kwargs
- Other keyword arguments are forwarded to `.Poly3DCollection`.
- """
-
- had_data = self.has_data()
-
- if Z.ndim != 2:
- raise ValueError("Argument Z must be 2-dimensional.")
-
- Z = cbook._to_unmasked_float_array(Z)
- X, Y, Z = np.broadcast_arrays(X, Y, Z)
- rows, cols = Z.shape
-
- has_stride = 'rstride' in kwargs or 'cstride' in kwargs
- has_count = 'rcount' in kwargs or 'ccount' in kwargs
-
- if has_stride and has_count:
- raise ValueError("Cannot specify both stride and count arguments")
-
- rstride = kwargs.pop('rstride', 10)
- cstride = kwargs.pop('cstride', 10)
- rcount = kwargs.pop('rcount', 50)
- ccount = kwargs.pop('ccount', 50)
-
- if mpl.rcParams['_internal.classic_mode']:
- # Strides have priority over counts in classic mode.
- # So, only compute strides from counts
- # if counts were explicitly given
- compute_strides = has_count
- else:
- # If the strides are provided then it has priority.
- # Otherwise, compute the strides from the counts.
- compute_strides = not has_stride
-
- if compute_strides:
- rstride = int(max(np.ceil(rows / rcount), 1))
- cstride = int(max(np.ceil(cols / ccount), 1))
-
- fcolors = kwargs.pop('facecolors', None)
-
- cmap = kwargs.get('cmap', None)
- shade = kwargs.pop('shade', cmap is None)
- if shade is None:
- raise ValueError("shade cannot be None.")
-
- colset = [] # the sampled facecolor
- if (rows - 1) % rstride == 0 and \
- (cols - 1) % cstride == 0 and \
- fcolors is None:
- polys = np.stack(
- [cbook._array_patch_perimeters(a, rstride, cstride)
- for a in (X, Y, Z)],
- axis=-1)
- else:
- # evenly spaced, and including both endpoints
- row_inds = list(range(0, rows-1, rstride)) + [rows-1]
- col_inds = list(range(0, cols-1, cstride)) + [cols-1]
-
- polys = []
- for rs, rs_next in zip(row_inds[:-1], row_inds[1:]):
- for cs, cs_next in zip(col_inds[:-1], col_inds[1:]):
- ps = [
- # +1 ensures we share edges between polygons
- cbook._array_perimeter(a[rs:rs_next+1, cs:cs_next+1])
- for a in (X, Y, Z)
- ]
- # ps = np.stack(ps, axis=-1)
- ps = np.array(ps).T
- polys.append(ps)
-
- if fcolors is not None:
- colset.append(fcolors[rs][cs])
-
- # In cases where there are non-finite values in the data (possibly NaNs from
- # masked arrays), artifacts can be introduced. Here check whether such values
- # are present and remove them.
- if not isinstance(polys, np.ndarray) or not np.isfinite(polys).all():
- new_polys = []
- new_colset = []
-
- # Depending on fcolors, colset is either an empty list or has as
- # many elements as polys. In the former case new_colset results in
- # a list with None entries, that is discarded later.
- for p, col in itertools.zip_longest(polys, colset):
- new_poly = np.array(p)[np.isfinite(p).all(axis=1)]
- if len(new_poly):
- new_polys.append(new_poly)
- new_colset.append(col)
-
- # Replace previous polys and, if fcolors is not None, colset
- polys = new_polys
- if fcolors is not None:
- colset = new_colset
-
- # note that the striding causes some polygons to have more coordinates
- # than others
-
- if fcolors is not None:
- polyc = art3d.Poly3DCollection(
- polys, edgecolors=colset, facecolors=colset, shade=shade,
- lightsource=lightsource, **kwargs)
- elif cmap:
- polyc = art3d.Poly3DCollection(polys, **kwargs)
- # can't always vectorize, because polys might be jagged
- if isinstance(polys, np.ndarray):
- avg_z = polys[..., 2].mean(axis=-1)
- else:
- avg_z = np.array([ps[:, 2].mean() for ps in polys])
- polyc.set_array(avg_z)
- if vmin is not None or vmax is not None:
- polyc.set_clim(vmin, vmax)
- if norm is not None:
- polyc.set_norm(norm)
- else:
- color = kwargs.pop('color', None)
- if color is None:
- color = self._get_lines.get_next_color()
- color = np.array(mcolors.to_rgba(color))
-
- polyc = art3d.Poly3DCollection(
- polys, facecolors=color, shade=shade,
- lightsource=lightsource, **kwargs)
-
- self.add_collection(polyc)
- self.auto_scale_xyz(X, Y, Z, had_data)
-
- return polyc
-
- def plot_wireframe(self, X, Y, Z, **kwargs):
- """
- Plot a 3D wireframe.
-
- .. note::
-
- The *rcount* and *ccount* kwargs, which both default to 50,
- determine the maximum number of samples used in each direction. If
- the input data is larger, it will be downsampled (by slicing) to
- these numbers of points.
-
- Parameters
- ----------
- X, Y, Z : 2D arrays
- Data values.
-
- rcount, ccount : int
- Maximum number of samples used in each direction. If the input
- data is larger, it will be downsampled (by slicing) to these
- numbers of points. Setting a count to zero causes the data to be
- not sampled in the corresponding direction, producing a 3D line
- plot rather than a wireframe plot. Defaults to 50.
-
- rstride, cstride : int
- Downsampling stride in each direction. These arguments are
- mutually exclusive with *rcount* and *ccount*. If only one of
- *rstride* or *cstride* is set, the other defaults to 1. Setting a
- stride to zero causes the data to be not sampled in the
- corresponding direction, producing a 3D line plot rather than a
- wireframe plot.
-
- 'classic' mode uses a default of ``rstride = cstride = 1`` instead
- of the new default of ``rcount = ccount = 50``.
-
- **kwargs
- Other keyword arguments are forwarded to `.Line3DCollection`.
- """
-
- had_data = self.has_data()
- if Z.ndim != 2:
- raise ValueError("Argument Z must be 2-dimensional.")
- # FIXME: Support masked arrays
- X, Y, Z = np.broadcast_arrays(X, Y, Z)
- rows, cols = Z.shape
-
- has_stride = 'rstride' in kwargs or 'cstride' in kwargs
- has_count = 'rcount' in kwargs or 'ccount' in kwargs
-
- if has_stride and has_count:
- raise ValueError("Cannot specify both stride and count arguments")
-
- rstride = kwargs.pop('rstride', 1)
- cstride = kwargs.pop('cstride', 1)
- rcount = kwargs.pop('rcount', 50)
- ccount = kwargs.pop('ccount', 50)
-
- if mpl.rcParams['_internal.classic_mode']:
- # Strides have priority over counts in classic mode.
- # So, only compute strides from counts
- # if counts were explicitly given
- if has_count:
- rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
- cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
- else:
- # If the strides are provided then it has priority.
- # Otherwise, compute the strides from the counts.
- if not has_stride:
- rstride = int(max(np.ceil(rows / rcount), 1)) if rcount else 0
- cstride = int(max(np.ceil(cols / ccount), 1)) if ccount else 0
-
- # We want two sets of lines, one running along the "rows" of
- # Z and another set of lines running along the "columns" of Z.
- # This transpose will make it easy to obtain the columns.
- tX, tY, tZ = np.transpose(X), np.transpose(Y), np.transpose(Z)
-
- if rstride:
- rii = list(range(0, rows, rstride))
- # Add the last index only if needed
- if rows > 0 and rii[-1] != (rows - 1):
- rii += [rows-1]
- else:
- rii = []
- if cstride:
- cii = list(range(0, cols, cstride))
- # Add the last index only if needed
- if cols > 0 and cii[-1] != (cols - 1):
- cii += [cols-1]
- else:
- cii = []
-
- if rstride == 0 and cstride == 0:
- raise ValueError("Either rstride or cstride must be non zero")
-
- # If the inputs were empty, then just
- # reset everything.
- if Z.size == 0:
- rii = []
- cii = []
-
- xlines = [X[i] for i in rii]
- ylines = [Y[i] for i in rii]
- zlines = [Z[i] for i in rii]
-
- txlines = [tX[i] for i in cii]
- tylines = [tY[i] for i in cii]
- tzlines = [tZ[i] for i in cii]
-
- lines = ([list(zip(xl, yl, zl))
- for xl, yl, zl in zip(xlines, ylines, zlines)]
- + [list(zip(xl, yl, zl))
- for xl, yl, zl in zip(txlines, tylines, tzlines)])
-
- linec = art3d.Line3DCollection(lines, **kwargs)
- self.add_collection(linec)
- self.auto_scale_xyz(X, Y, Z, had_data)
-
- return linec
-
- def plot_trisurf(self, *args, color=None, norm=None, vmin=None, vmax=None,
- lightsource=None, **kwargs):
- """
- Plot a triangulated surface.
-
- The (optional) triangulation can be specified in one of two ways;
- either::
-
- plot_trisurf(triangulation, ...)
-
- where triangulation is a `~matplotlib.tri.Triangulation` object, or::
-
- plot_trisurf(X, Y, ...)
- plot_trisurf(X, Y, triangles, ...)
- plot_trisurf(X, Y, triangles=triangles, ...)
-
- in which case a Triangulation object will be created. See
- `.Triangulation` for an explanation of these possibilities.
-
- The remaining arguments are::
-
- plot_trisurf(..., Z)
-
- where *Z* is the array of values to contour, one per point
- in the triangulation.
-
- Parameters
- ----------
- X, Y, Z : array-like
- Data values as 1D arrays.
- color
- Color of the surface patches.
- cmap
- A colormap for the surface patches.
- norm : Normalize
- An instance of Normalize to map values to colors.
- vmin, vmax : float, default: None
- Minimum and maximum value to map.
- shade : bool, default: True
- Whether to shade the facecolors. Shading is always disabled when
- *cmap* is specified.
- lightsource : `~matplotlib.colors.LightSource`
- The lightsource to use when *shade* is True.
- **kwargs
- All other keyword arguments are passed on to
- :class:`~mpl_toolkits.mplot3d.art3d.Poly3DCollection`
-
- Examples
- --------
- .. plot:: gallery/mplot3d/trisurf3d.py
- .. plot:: gallery/mplot3d/trisurf3d_2.py
- """
-
- had_data = self.has_data()
-
- # TODO: Support custom face colours
- if color is None:
- color = self._get_lines.get_next_color()
- color = np.array(mcolors.to_rgba(color))
-
- cmap = kwargs.get('cmap', None)
- shade = kwargs.pop('shade', cmap is None)
-
- tri, args, kwargs = \
- Triangulation.get_from_args_and_kwargs(*args, **kwargs)
- try:
- z = kwargs.pop('Z')
- except KeyError:
- # We do this so Z doesn't get passed as an arg to PolyCollection
- z, *args = args
- z = np.asarray(z)
-
- triangles = tri.get_masked_triangles()
- xt = tri.x[triangles]
- yt = tri.y[triangles]
- zt = z[triangles]
- verts = np.stack((xt, yt, zt), axis=-1)
-
- if cmap:
- polyc = art3d.Poly3DCollection(verts, *args, **kwargs)
- # average over the three points of each triangle
- avg_z = verts[:, :, 2].mean(axis=1)
- polyc.set_array(avg_z)
- if vmin is not None or vmax is not None:
- polyc.set_clim(vmin, vmax)
- if norm is not None:
- polyc.set_norm(norm)
- else:
- polyc = art3d.Poly3DCollection(
- verts, *args, shade=shade, lightsource=lightsource,
- facecolors=color, **kwargs)
-
- self.add_collection(polyc)
- self.auto_scale_xyz(tri.x, tri.y, z, had_data)
-
- return polyc
-
- def _3d_extend_contour(self, cset, stride=5):
- """
- Extend a contour in 3D by creating
- """
-
- dz = (cset.levels[1] - cset.levels[0]) / 2
- polyverts = []
- colors = []
- for idx, level in enumerate(cset.levels):
- path = cset.get_paths()[idx]
- subpaths = [*path._iter_connected_components()]
- color = cset.get_edgecolor()[idx]
- top = art3d._paths_to_3d_segments(subpaths, level - dz)
- bot = art3d._paths_to_3d_segments(subpaths, level + dz)
- if not len(top[0]):
- continue
- nsteps = max(round(len(top[0]) / stride), 2)
- stepsize = (len(top[0]) - 1) / (nsteps - 1)
- polyverts.extend([
- (top[0][round(i * stepsize)], top[0][round((i + 1) * stepsize)],
- bot[0][round((i + 1) * stepsize)], bot[0][round(i * stepsize)])
- for i in range(round(nsteps) - 1)])
- colors.extend([color] * (round(nsteps) - 1))
- self.add_collection3d(art3d.Poly3DCollection(
- np.array(polyverts), # All polygons have 4 vertices, so vectorize.
- facecolors=colors, edgecolors=colors, shade=True))
- cset.remove()
-
- def add_contour_set(
- self, cset, extend3d=False, stride=5, zdir='z', offset=None):
- zdir = '-' + zdir
- if extend3d:
- self._3d_extend_contour(cset, stride)
- else:
- art3d.collection_2d_to_3d(
- cset, zs=offset if offset is not None else cset.levels, zdir=zdir)
-
- def add_contourf_set(self, cset, zdir='z', offset=None):
- self._add_contourf_set(cset, zdir=zdir, offset=offset)
-
- def _add_contourf_set(self, cset, zdir='z', offset=None):
- """
- Returns
- -------
- levels : `numpy.ndarray`
- Levels at which the filled contours are added.
- """
- zdir = '-' + zdir
-
- midpoints = cset.levels[:-1] + np.diff(cset.levels) / 2
- # Linearly interpolate to get levels for any extensions
- if cset._extend_min:
- min_level = cset.levels[0] - np.diff(cset.levels[:2]) / 2
- midpoints = np.insert(midpoints, 0, min_level)
- if cset._extend_max:
- max_level = cset.levels[-1] + np.diff(cset.levels[-2:]) / 2
- midpoints = np.append(midpoints, max_level)
-
- art3d.collection_2d_to_3d(
- cset, zs=offset if offset is not None else midpoints, zdir=zdir)
- return midpoints
-
- @_preprocess_data()
- def contour(self, X, Y, Z, *args,
- extend3d=False, stride=5, zdir='z', offset=None, **kwargs):
- """
- Create a 3D contour plot.
-
- Parameters
- ----------
- X, Y, Z : array-like,
- Input data. See `.Axes.contour` for supported data shapes.
- extend3d : bool, default: False
- Whether to extend contour in 3D.
- stride : int
- Step size for extending contour.
- zdir : {'x', 'y', 'z'}, default: 'z'
- The direction to use.
- offset : float, optional
- If specified, plot a projection of the contour lines at this
- position in a plane normal to *zdir*.
- data : indexable object, optional
- DATA_PARAMETER_PLACEHOLDER
-
- *args, **kwargs
- Other arguments are forwarded to `matplotlib.axes.Axes.contour`.
-
- Returns
- -------
- matplotlib.contour.QuadContourSet
- """
- had_data = self.has_data()
-
- jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
- cset = super().contour(jX, jY, jZ, *args, **kwargs)
- self.add_contour_set(cset, extend3d, stride, zdir, offset)
-
- self.auto_scale_xyz(X, Y, Z, had_data)
- return cset
-
- contour3D = contour
-
- @_preprocess_data()
- def tricontour(self, *args,
- extend3d=False, stride=5, zdir='z', offset=None, **kwargs):
- """
- Create a 3D contour plot.
-
- .. note::
- This method currently produces incorrect output due to a
- longstanding bug in 3D PolyCollection rendering.
-
- Parameters
- ----------
- X, Y, Z : array-like
- Input data. See `.Axes.tricontour` for supported data shapes.
- extend3d : bool, default: False
- Whether to extend contour in 3D.
- stride : int
- Step size for extending contour.
- zdir : {'x', 'y', 'z'}, default: 'z'
- The direction to use.
- offset : float, optional
- If specified, plot a projection of the contour lines at this
- position in a plane normal to *zdir*.
- data : indexable object, optional
- DATA_PARAMETER_PLACEHOLDER
- *args, **kwargs
- Other arguments are forwarded to `matplotlib.axes.Axes.tricontour`.
-
- Returns
- -------
- matplotlib.tri._tricontour.TriContourSet
- """
- had_data = self.has_data()
-
- tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
- *args, **kwargs)
- X = tri.x
- Y = tri.y
- if 'Z' in kwargs:
- Z = kwargs.pop('Z')
- else:
- # We do this so Z doesn't get passed as an arg to Axes.tricontour
- Z, *args = args
-
- jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
- tri = Triangulation(jX, jY, tri.triangles, tri.mask)
-
- cset = super().tricontour(tri, jZ, *args, **kwargs)
- self.add_contour_set(cset, extend3d, stride, zdir, offset)
-
- self.auto_scale_xyz(X, Y, Z, had_data)
- return cset
-
- def _auto_scale_contourf(self, X, Y, Z, zdir, levels, had_data):
- # Autoscale in the zdir based on the levels added, which are
- # different from data range if any contour extensions are present
- dim_vals = {'x': X, 'y': Y, 'z': Z, zdir: levels}
- # Input data and levels have different sizes, but auto_scale_xyz
- # expected same-size input, so manually take min/max limits
- limits = [(np.nanmin(dim_vals[dim]), np.nanmax(dim_vals[dim]))
- for dim in ['x', 'y', 'z']]
- self.auto_scale_xyz(*limits, had_data)
-
- @_preprocess_data()
- def contourf(self, X, Y, Z, *args, zdir='z', offset=None, **kwargs):
- """
- Create a 3D filled contour plot.
-
- Parameters
- ----------
- X, Y, Z : array-like
- Input data. See `.Axes.contourf` for supported data shapes.
- zdir : {'x', 'y', 'z'}, default: 'z'
- The direction to use.
- offset : float, optional
- If specified, plot a projection of the contour lines at this
- position in a plane normal to *zdir*.
- data : indexable object, optional
- DATA_PARAMETER_PLACEHOLDER
- *args, **kwargs
- Other arguments are forwarded to `matplotlib.axes.Axes.contourf`.
-
- Returns
- -------
- matplotlib.contour.QuadContourSet
- """
- had_data = self.has_data()
-
- jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
- cset = super().contourf(jX, jY, jZ, *args, **kwargs)
- levels = self._add_contourf_set(cset, zdir, offset)
-
- self._auto_scale_contourf(X, Y, Z, zdir, levels, had_data)
- return cset
-
- contourf3D = contourf
-
- @_preprocess_data()
- def tricontourf(self, *args, zdir='z', offset=None, **kwargs):
- """
- Create a 3D filled contour plot.
-
- .. note::
- This method currently produces incorrect output due to a
- longstanding bug in 3D PolyCollection rendering.
-
- Parameters
- ----------
- X, Y, Z : array-like
- Input data. See `.Axes.tricontourf` for supported data shapes.
- zdir : {'x', 'y', 'z'}, default: 'z'
- The direction to use.
- offset : float, optional
- If specified, plot a projection of the contour lines at this
- position in a plane normal to zdir.
- data : indexable object, optional
- DATA_PARAMETER_PLACEHOLDER
- *args, **kwargs
- Other arguments are forwarded to
- `matplotlib.axes.Axes.tricontourf`.
-
- Returns
- -------
- matplotlib.tri._tricontour.TriContourSet
- """
- had_data = self.has_data()
-
- tri, args, kwargs = Triangulation.get_from_args_and_kwargs(
- *args, **kwargs)
- X = tri.x
- Y = tri.y
- if 'Z' in kwargs:
- Z = kwargs.pop('Z')
- else:
- # We do this so Z doesn't get passed as an arg to Axes.tricontourf
- Z, *args = args
-
- jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir)
- tri = Triangulation(jX, jY, tri.triangles, tri.mask)
-
- cset = super().tricontourf(tri, jZ, *args, **kwargs)
- levels = self._add_contourf_set(cset, zdir, offset)
-
- self._auto_scale_contourf(X, Y, Z, zdir, levels, had_data)
- return cset
-
- def add_collection3d(self, col, zs=0, zdir='z'):
- """
- Add a 3D collection object to the plot.
-
- 2D collection types are converted to a 3D version by
- modifying the object and adding z coordinate information.
-
- Supported are:
-
- - PolyCollection
- - LineCollection
- - PatchCollection
- """
- zvals = np.atleast_1d(zs)
- zsortval = (np.min(zvals) if zvals.size
- else 0) # FIXME: arbitrary default
-
- # FIXME: use issubclass() (although, then a 3D collection
- # object would also pass.) Maybe have a collection3d
- # abstract class to test for and exclude?
- if type(col) is mcoll.PolyCollection:
- art3d.poly_collection_2d_to_3d(col, zs=zs, zdir=zdir)
- col.set_sort_zpos(zsortval)
- elif type(col) is mcoll.LineCollection:
- art3d.line_collection_2d_to_3d(col, zs=zs, zdir=zdir)
- col.set_sort_zpos(zsortval)
- elif type(col) is mcoll.PatchCollection:
- art3d.patch_collection_2d_to_3d(col, zs=zs, zdir=zdir)
- col.set_sort_zpos(zsortval)
-
- collection = super().add_collection(col)
- return collection
-
- @_preprocess_data(replace_names=["xs", "ys", "zs", "s",
- "edgecolors", "c", "facecolor",
- "facecolors", "color"])
- def scatter(self, xs, ys, zs=0, zdir='z', s=20, c=None, depthshade=True,
- *args, **kwargs):
- """
- Create a scatter plot.
-
- Parameters
- ----------
- xs, ys : array-like
- The data positions.
- zs : float or array-like, default: 0
- The z-positions. Either an array of the same length as *xs* and
- *ys* or a single value to place all points in the same plane.
- zdir : {'x', 'y', 'z', '-x', '-y', '-z'}, default: 'z'
- The axis direction for the *zs*. This is useful when plotting 2D
- data on a 3D Axes. The data must be passed as *xs*, *ys*. Setting
- *zdir* to 'y' then plots the data to the x-z-plane.
-
- See also :doc:`/gallery/mplot3d/2dcollections3d`.
-
- s : float or array-like, default: 20
- The marker size in points**2. Either an array of the same length
- as *xs* and *ys* or a single value to make all markers the same
- size.
- c : color, sequence, or sequence of colors, optional
- The marker color. Possible values:
-
- - A single color format string.
- - A sequence of colors of length n.
- - A sequence of n numbers to be mapped to colors using *cmap* and
- *norm*.
- - A 2D array in which the rows are RGB or RGBA.
-
- For more details see the *c* argument of `~.axes.Axes.scatter`.
- depthshade : bool, default: True
- Whether to shade the scatter markers to give the appearance of
- depth. Each call to ``scatter()`` will perform its depthshading
- independently.
- data : indexable object, optional
- DATA_PARAMETER_PLACEHOLDER
- **kwargs
- All other keyword arguments are passed on to `~.axes.Axes.scatter`.
-
- Returns
- -------
- paths : `~matplotlib.collections.PathCollection`
- """
-
- had_data = self.has_data()
- zs_orig = zs
-
- xs, ys, zs = np.broadcast_arrays(
- *[np.ravel(np.ma.filled(t, np.nan)) for t in [xs, ys, zs]])
- s = np.ma.ravel(s) # This doesn't have to match x, y in size.
-
- xs, ys, zs, s, c, color = cbook.delete_masked_points(
- xs, ys, zs, s, c, kwargs.get('color', None)
- )
- if kwargs.get("color") is not None:
- kwargs['color'] = color
-
- # For xs and ys, 2D scatter() will do the copying.
- if np.may_share_memory(zs_orig, zs): # Avoid unnecessary copies.
- zs = zs.copy()
-
- patches = super().scatter(xs, ys, s=s, c=c, *args, **kwargs)
- art3d.patch_collection_2d_to_3d(patches, zs=zs, zdir=zdir,
- depthshade=depthshade)
-
- if self._zmargin < 0.05 and xs.size > 0:
- self.set_zmargin(0.05)
-
- self.auto_scale_xyz(xs, ys, zs, had_data)
-
- return patches
-
- scatter3D = scatter
-
- @_preprocess_data()
- def bar(self, left, height, zs=0, zdir='z', *args, **kwargs):
- """
- Add 2D bar(s).
-
- Parameters
- ----------
- left : 1D array-like
- The x coordinates of the left sides of the bars.
- height : 1D array-like
- The height of the bars.
- zs : float or 1D array-like
- Z coordinate of bars; if a single value is specified, it will be
- used for all bars.
- zdir : {'x', 'y', 'z'}, default: 'z'
- When plotting 2D data, the direction to use as z ('x', 'y' or 'z').
- data : indexable object, optional
- DATA_PARAMETER_PLACEHOLDER
- **kwargs
- Other keyword arguments are forwarded to
- `matplotlib.axes.Axes.bar`.
-
- Returns
- -------
- mpl_toolkits.mplot3d.art3d.Patch3DCollection
- """
- had_data = self.has_data()
-
- patches = super().bar(left, height, *args, **kwargs)
-
- zs = np.broadcast_to(zs, len(left))
-
- verts = []
- verts_zs = []
- for p, z in zip(patches, zs):
- vs = art3d._get_patch_verts(p)
- verts += vs.tolist()
- verts_zs += [z] * len(vs)
- art3d.patch_2d_to_3d(p, z, zdir)
- if 'alpha' in kwargs:
- p.set_alpha(kwargs['alpha'])
-
- if len(verts) > 0:
- # the following has to be skipped if verts is empty
- # NOTE: Bugs could still occur if len(verts) > 0,
- # but the "2nd dimension" is empty.
- xs, ys = zip(*verts)
- else:
- xs, ys = [], []
-
- xs, ys, verts_zs = art3d.juggle_axes(xs, ys, verts_zs, zdir)
- self.auto_scale_xyz(xs, ys, verts_zs, had_data)
-
- return patches
-
- @_preprocess_data()
- def bar3d(self, x, y, z, dx, dy, dz, color=None,
- zsort='average', shade=True, lightsource=None, *args, **kwargs):
- """
- Generate a 3D barplot.
-
- This method creates three-dimensional barplot where the width,
- depth, height, and color of the bars can all be uniquely set.
-
- Parameters
- ----------
- x, y, z : array-like
- The coordinates of the anchor point of the bars.
-
- dx, dy, dz : float or array-like
- The width, depth, and height of the bars, respectively.
-
- color : sequence of colors, optional
- The color of the bars can be specified globally or
- individually. This parameter can be:
-
- - A single color, to color all bars the same color.
- - An array of colors of length N bars, to color each bar
- independently.
- - An array of colors of length 6, to color the faces of the
- bars similarly.
- - An array of colors of length 6 * N bars, to color each face
- independently.
-
- When coloring the faces of the boxes specifically, this is
- the order of the coloring:
-
- 1. -Z (bottom of box)
- 2. +Z (top of box)
- 3. -Y
- 4. +Y
- 5. -X
- 6. +X
-
- zsort : str, optional
- The z-axis sorting scheme passed onto `~.art3d.Poly3DCollection`
-
- shade : bool, default: True
- When true, this shades the dark sides of the bars (relative
- to the plot's source of light).
-
- lightsource : `~matplotlib.colors.LightSource`
- The lightsource to use when *shade* is True.
-
- data : indexable object, optional
- DATA_PARAMETER_PLACEHOLDER
-
- **kwargs
- Any additional keyword arguments are passed onto
- `~.art3d.Poly3DCollection`.
-
- Returns
- -------
- collection : `~.art3d.Poly3DCollection`
- A collection of three-dimensional polygons representing the bars.
- """
-
- had_data = self.has_data()
-
- x, y, z, dx, dy, dz = np.broadcast_arrays(
- np.atleast_1d(x), y, z, dx, dy, dz)
- minx = np.min(x)
- maxx = np.max(x + dx)
- miny = np.min(y)
- maxy = np.max(y + dy)
- minz = np.min(z)
- maxz = np.max(z + dz)
-
- # shape (6, 4, 3)
- # All faces are oriented facing outwards - when viewed from the
- # outside, their vertices are in a counterclockwise ordering.
- cuboid = np.array([
- # -z
- (
- (0, 0, 0),
- (0, 1, 0),
- (1, 1, 0),
- (1, 0, 0),
- ),
- # +z
- (
- (0, 0, 1),
- (1, 0, 1),
- (1, 1, 1),
- (0, 1, 1),
- ),
- # -y
- (
- (0, 0, 0),
- (1, 0, 0),
- (1, 0, 1),
- (0, 0, 1),
- ),
- # +y
- (
- (0, 1, 0),
- (0, 1, 1),
- (1, 1, 1),
- (1, 1, 0),
- ),
- # -x
- (
- (0, 0, 0),
- (0, 0, 1),
- (0, 1, 1),
- (0, 1, 0),
- ),
- # +x
- (
- (1, 0, 0),
- (1, 1, 0),
- (1, 1, 1),
- (1, 0, 1),
- ),
- ])
-
- # indexed by [bar, face, vertex, coord]
- polys = np.empty(x.shape + cuboid.shape)
-
- # handle each coordinate separately
- for i, p, dp in [(0, x, dx), (1, y, dy), (2, z, dz)]:
- p = p[..., np.newaxis, np.newaxis]
- dp = dp[..., np.newaxis, np.newaxis]
- polys[..., i] = p + dp * cuboid[..., i]
-
- # collapse the first two axes
- polys = polys.reshape((-1,) + polys.shape[2:])
-
- facecolors = []
- if color is None:
- color = [self._get_patches_for_fill.get_next_color()]
-
- color = list(mcolors.to_rgba_array(color))
-
- if len(color) == len(x):
- # bar colors specified, need to expand to number of faces
- for c in color:
- facecolors.extend([c] * 6)
- else:
- # a single color specified, or face colors specified explicitly
- facecolors = color
- if len(facecolors) < len(x):
- facecolors *= (6 * len(x))
-
- col = art3d.Poly3DCollection(polys,
- zsort=zsort,
- facecolors=facecolors,
- shade=shade,
- lightsource=lightsource,
- *args, **kwargs)
- self.add_collection(col)
-
- self.auto_scale_xyz((minx, maxx), (miny, maxy), (minz, maxz), had_data)
-
- return col
-
- def set_title(self, label, fontdict=None, loc='center', **kwargs):
- # docstring inherited
- ret = super().set_title(label, fontdict=fontdict, loc=loc, **kwargs)
- (x, y) = self.title.get_position()
- self.title.set_y(0.92 * y)
- return ret
-
- @_preprocess_data()
- def quiver(self, X, Y, Z, U, V, W, *,
- length=1, arrow_length_ratio=.3, pivot='tail', normalize=False,
- **kwargs):
- """
- Plot a 3D field of arrows.
-
- The arguments can be array-like or scalars, so long as they can be
- broadcast together. The arguments can also be masked arrays. If an
- element in any of argument is masked, then that corresponding quiver
- element will not be plotted.
-
- Parameters
- ----------
- X, Y, Z : array-like
- The x, y and z coordinates of the arrow locations (default is
- tail of arrow; see *pivot* kwarg).
-
- U, V, W : array-like
- The x, y and z components of the arrow vectors.
-
- length : float, default: 1
- The length of each quiver.
-
- arrow_length_ratio : float, default: 0.3
- The ratio of the arrow head with respect to the quiver.
-
- pivot : {'tail', 'middle', 'tip'}, default: 'tail'
- The part of the arrow that is at the grid point; the arrow
- rotates about this point, hence the name *pivot*.
-
- normalize : bool, default: False
- Whether all arrows are normalized to have the same length, or keep
- the lengths defined by *u*, *v*, and *w*.
-
- data : indexable object, optional
- DATA_PARAMETER_PLACEHOLDER
-
- **kwargs
- Any additional keyword arguments are delegated to
- :class:`.Line3DCollection`
- """
-
- def calc_arrows(UVW):
- # get unit direction vector perpendicular to (u, v, w)
- x = UVW[:, 0]
- y = UVW[:, 1]
- norm = np.linalg.norm(UVW[:, :2], axis=1)
- x_p = np.divide(y, norm, where=norm != 0, out=np.zeros_like(x))
- y_p = np.divide(-x, norm, where=norm != 0, out=np.ones_like(x))
- # compute the two arrowhead direction unit vectors
- rangle = math.radians(15)
- c = math.cos(rangle)
- s = math.sin(rangle)
- # construct the rotation matrices of shape (3, 3, n)
- r13 = y_p * s
- r32 = x_p * s
- r12 = x_p * y_p * (1 - c)
- Rpos = np.array(
- [[c + (x_p ** 2) * (1 - c), r12, r13],
- [r12, c + (y_p ** 2) * (1 - c), -r32],
- [-r13, r32, np.full_like(x_p, c)]])
- # opposite rotation negates all the sin terms
- Rneg = Rpos.copy()
- Rneg[[0, 1, 2, 2], [2, 2, 0, 1]] *= -1
- # Batch n (3, 3) x (3) matrix multiplications ((3, 3, n) x (n, 3)).
- Rpos_vecs = np.einsum("ij...,...j->...i", Rpos, UVW)
- Rneg_vecs = np.einsum("ij...,...j->...i", Rneg, UVW)
- # Stack into (n, 2, 3) result.
- return np.stack([Rpos_vecs, Rneg_vecs], axis=1)
-
- had_data = self.has_data()
-
- input_args = [X, Y, Z, U, V, W]
-
- # extract the masks, if any
- masks = [k.mask for k in input_args
- if isinstance(k, np.ma.MaskedArray)]
- # broadcast to match the shape
- bcast = np.broadcast_arrays(*input_args, *masks)
- input_args = bcast[:6]
- masks = bcast[6:]
- if masks:
- # combine the masks into one
- mask = functools.reduce(np.logical_or, masks)
- # put mask on and compress
- input_args = [np.ma.array(k, mask=mask).compressed()
- for k in input_args]
- else:
- input_args = [np.ravel(k) for k in input_args]
-
- if any(len(v) == 0 for v in input_args):
- # No quivers, so just make an empty collection and return early
- linec = art3d.Line3DCollection([], **kwargs)
- self.add_collection(linec)
- return linec
-
- shaft_dt = np.array([0., length], dtype=float)
- arrow_dt = shaft_dt * arrow_length_ratio
-
- _api.check_in_list(['tail', 'middle', 'tip'], pivot=pivot)
- if pivot == 'tail':
- shaft_dt -= length
- elif pivot == 'middle':
- shaft_dt -= length / 2
-
- XYZ = np.column_stack(input_args[:3])
- UVW = np.column_stack(input_args[3:]).astype(float)
-
- # Normalize rows of UVW
- norm = np.linalg.norm(UVW, axis=1)
-
- # If any row of UVW is all zeros, don't make a quiver for it
- mask = norm > 0
- XYZ = XYZ[mask]
- if normalize:
- UVW = UVW[mask] / norm[mask].reshape((-1, 1))
- else:
- UVW = UVW[mask]
-
- if len(XYZ) > 0:
- # compute the shaft lines all at once with an outer product
- shafts = (XYZ - np.multiply.outer(shaft_dt, UVW)).swapaxes(0, 1)
- # compute head direction vectors, n heads x 2 sides x 3 dimensions
- head_dirs = calc_arrows(UVW)
- # compute all head lines at once, starting from the shaft ends
- heads = shafts[:, :1] - np.multiply.outer(arrow_dt, head_dirs)
- # stack left and right head lines together
- heads = heads.reshape((len(arrow_dt), -1, 3))
- # transpose to get a list of lines
- heads = heads.swapaxes(0, 1)
-
- lines = [*shafts, *heads]
- else:
- lines = []
-
- linec = art3d.Line3DCollection(lines, **kwargs)
- self.add_collection(linec)
-
- self.auto_scale_xyz(XYZ[:, 0], XYZ[:, 1], XYZ[:, 2], had_data)
-
- return linec
-
- quiver3D = quiver
-
- def voxels(self, *args, facecolors=None, edgecolors=None, shade=True,
- lightsource=None, **kwargs):
- """
- ax.voxels([x, y, z,] /, filled, facecolors=None, edgecolors=None, \
-**kwargs)
-
- Plot a set of filled voxels
-
- All voxels are plotted as 1x1x1 cubes on the axis, with
- ``filled[0, 0, 0]`` placed with its lower corner at the origin.
- Occluded faces are not plotted.
-
- Parameters
- ----------
- filled : 3D np.array of bool
- A 3D array of values, with truthy values indicating which voxels
- to fill
-
- x, y, z : 3D np.array, optional
- The coordinates of the corners of the voxels. This should broadcast
- to a shape one larger in every dimension than the shape of
- *filled*. These can be used to plot non-cubic voxels.
-
- If not specified, defaults to increasing integers along each axis,
- like those returned by :func:`~numpy.indices`.
- As indicated by the ``/`` in the function signature, these
- arguments can only be passed positionally.
-
- facecolors, edgecolors : array-like, optional
- The color to draw the faces and edges of the voxels. Can only be
- passed as keyword arguments.
- These parameters can be:
-
- - A single color value, to color all voxels the same color. This
- can be either a string, or a 1D RGB/RGBA array
- - ``None``, the default, to use a single color for the faces, and
- the style default for the edges.
- - A 3D `~numpy.ndarray` of color names, with each item the color
- for the corresponding voxel. The size must match the voxels.
- - A 4D `~numpy.ndarray` of RGB/RGBA data, with the components
- along the last axis.
-
- shade : bool, default: True
- Whether to shade the facecolors.
-
- lightsource : `~matplotlib.colors.LightSource`
- The lightsource to use when *shade* is True.
-
- **kwargs
- Additional keyword arguments to pass onto
- `~mpl_toolkits.mplot3d.art3d.Poly3DCollection`.
-
- Returns
- -------
- faces : dict
- A dictionary indexed by coordinate, where ``faces[i, j, k]`` is a
- `.Poly3DCollection` of the faces drawn for the voxel
- ``filled[i, j, k]``. If no faces were drawn for a given voxel,
- either because it was not asked to be drawn, or it is fully
- occluded, then ``(i, j, k) not in faces``.
-
- Examples
- --------
- .. plot:: gallery/mplot3d/voxels.py
- .. plot:: gallery/mplot3d/voxels_rgb.py
- .. plot:: gallery/mplot3d/voxels_torus.py
- .. plot:: gallery/mplot3d/voxels_numpy_logo.py
- """
-
- # work out which signature we should be using, and use it to parse
- # the arguments. Name must be voxels for the correct error message
- if len(args) >= 3:
- # underscores indicate position only
- def voxels(__x, __y, __z, filled, **kwargs):
- return (__x, __y, __z), filled, kwargs
- else:
- def voxels(filled, **kwargs):
- return None, filled, kwargs
-
- xyz, filled, kwargs = voxels(*args, **kwargs)
-
- # check dimensions
- if filled.ndim != 3:
- raise ValueError("Argument filled must be 3-dimensional")
- size = np.array(filled.shape, dtype=np.intp)
-
- # check xyz coordinates, which are one larger than the filled shape
- coord_shape = tuple(size + 1)
- if xyz is None:
- x, y, z = np.indices(coord_shape)
- else:
- x, y, z = (np.broadcast_to(c, coord_shape) for c in xyz)
-
- def _broadcast_color_arg(color, name):
- if np.ndim(color) in (0, 1):
- # single color, like "red" or [1, 0, 0]
- return np.broadcast_to(color, filled.shape + np.shape(color))
- elif np.ndim(color) in (3, 4):
- # 3D array of strings, or 4D array with last axis rgb
- if np.shape(color)[:3] != filled.shape:
- raise ValueError(
- f"When multidimensional, {name} must match the shape "
- "of filled")
- return color
- else:
- raise ValueError(f"Invalid {name} argument")
-
- # broadcast and default on facecolors
- if facecolors is None:
- facecolors = self._get_patches_for_fill.get_next_color()
- facecolors = _broadcast_color_arg(facecolors, 'facecolors')
-
- # broadcast but no default on edgecolors
- edgecolors = _broadcast_color_arg(edgecolors, 'edgecolors')
-
- # scale to the full array, even if the data is only in the center
- self.auto_scale_xyz(x, y, z)
-
- # points lying on corners of a square
- square = np.array([
- [0, 0, 0],
- [1, 0, 0],
- [1, 1, 0],
- [0, 1, 0],
- ], dtype=np.intp)
-
- voxel_faces = defaultdict(list)
-
- def permutation_matrices(n):
- """Generate cyclic permutation matrices."""
- mat = np.eye(n, dtype=np.intp)
- for i in range(n):
- yield mat
- mat = np.roll(mat, 1, axis=0)
-
- # iterate over each of the YZ, ZX, and XY orientations, finding faces
- # to render
- for permute in permutation_matrices(3):
- # find the set of ranges to iterate over
- pc, qc, rc = permute.T.dot(size)
- pinds = np.arange(pc)
- qinds = np.arange(qc)
- rinds = np.arange(rc)
-
- square_rot_pos = square.dot(permute.T)
- square_rot_neg = square_rot_pos[::-1]
-
- # iterate within the current plane
- for p in pinds:
- for q in qinds:
- # iterate perpendicularly to the current plane, handling
- # boundaries. We only draw faces between a voxel and an
- # empty space, to avoid drawing internal faces.
-
- # draw lower faces
- p0 = permute.dot([p, q, 0])
- i0 = tuple(p0)
- if filled[i0]:
- voxel_faces[i0].append(p0 + square_rot_neg)
-
- # draw middle faces
- for r1, r2 in zip(rinds[:-1], rinds[1:]):
- p1 = permute.dot([p, q, r1])
- p2 = permute.dot([p, q, r2])
-
- i1 = tuple(p1)
- i2 = tuple(p2)
-
- if filled[i1] and not filled[i2]:
- voxel_faces[i1].append(p2 + square_rot_pos)
- elif not filled[i1] and filled[i2]:
- voxel_faces[i2].append(p2 + square_rot_neg)
-
- # draw upper faces
- pk = permute.dot([p, q, rc-1])
- pk2 = permute.dot([p, q, rc])
- ik = tuple(pk)
- if filled[ik]:
- voxel_faces[ik].append(pk2 + square_rot_pos)
-
- # iterate over the faces, and generate a Poly3DCollection for each
- # voxel
- polygons = {}
- for coord, faces_inds in voxel_faces.items():
- # convert indices into 3D positions
- if xyz is None:
- faces = faces_inds
- else:
- faces = []
- for face_inds in faces_inds:
- ind = face_inds[:, 0], face_inds[:, 1], face_inds[:, 2]
- face = np.empty(face_inds.shape)
- face[:, 0] = x[ind]
- face[:, 1] = y[ind]
- face[:, 2] = z[ind]
- faces.append(face)
-
- # shade the faces
- facecolor = facecolors[coord]
- edgecolor = edgecolors[coord]
-
- poly = art3d.Poly3DCollection(
- faces, facecolors=facecolor, edgecolors=edgecolor,
- shade=shade, lightsource=lightsource, **kwargs)
- self.add_collection3d(poly)
- polygons[coord] = poly
-
- return polygons
-
- @_preprocess_data(replace_names=["x", "y", "z", "xerr", "yerr", "zerr"])
- def errorbar(self, x, y, z, zerr=None, yerr=None, xerr=None, fmt='',
- barsabove=False, errorevery=1, ecolor=None, elinewidth=None,
- capsize=None, capthick=None, xlolims=False, xuplims=False,
- ylolims=False, yuplims=False, zlolims=False, zuplims=False,
- **kwargs):
- """
- Plot lines and/or markers with errorbars around them.
-
- *x*/*y*/*z* define the data locations, and *xerr*/*yerr*/*zerr* define
- the errorbar sizes. By default, this draws the data markers/lines as
- well the errorbars. Use fmt='none' to draw errorbars only.
-
- Parameters
- ----------
- x, y, z : float or array-like
- The data positions.
-
- xerr, yerr, zerr : float or array-like, shape (N,) or (2, N), optional
- The errorbar sizes:
-
- - scalar: Symmetric +/- values for all data points.
- - shape(N,): Symmetric +/-values for each data point.
- - shape(2, N): Separate - and + values for each bar. First row
- contains the lower errors, the second row contains the upper
- errors.
- - *None*: No errorbar.
-
- Note that all error arrays should have *positive* values.
-
- fmt : str, default: ''
- The format for the data points / data lines. See `.plot` for
- details.
-
- Use 'none' (case-insensitive) to plot errorbars without any data
- markers.
-
- ecolor : color, default: None
- The color of the errorbar lines. If None, use the color of the
- line connecting the markers.
-
- elinewidth : float, default: None
- The linewidth of the errorbar lines. If None, the linewidth of
- the current style is used.
-
- capsize : float, default: :rc:`errorbar.capsize`
- The length of the error bar caps in points.
-
- capthick : float, default: None
- An alias to the keyword argument *markeredgewidth* (a.k.a. *mew*).
- This setting is a more sensible name for the property that
- controls the thickness of the error bar cap in points. For
- backwards compatibility, if *mew* or *markeredgewidth* are given,
- then they will over-ride *capthick*. This may change in future
- releases.
-
- barsabove : bool, default: False
- If True, will plot the errorbars above the plot
- symbols. Default is below.
-
- xlolims, ylolims, zlolims : bool, default: False
- These arguments can be used to indicate that a value gives only
- lower limits. In that case a caret symbol is used to indicate
- this. *lims*-arguments may be scalars, or array-likes of the same
- length as the errors. To use limits with inverted axes,
- `~.Axes.set_xlim` or `~.Axes.set_ylim` must be called before
- `errorbar`. Note the tricky parameter names: setting e.g.
- *ylolims* to True means that the y-value is a *lower* limit of the
- True value, so, only an *upward*-pointing arrow will be drawn!
-
- xuplims, yuplims, zuplims : bool, default: False
- Same as above, but for controlling the upper limits.
-
- errorevery : int or (int, int), default: 1
- draws error bars on a subset of the data. *errorevery* =N draws
- error bars on the points (x[::N], y[::N], z[::N]).
- *errorevery* =(start, N) draws error bars on the points
- (x[start::N], y[start::N], z[start::N]). e.g. *errorevery* =(6, 3)
- adds error bars to the data at (x[6], x[9], x[12], x[15], ...).
- Used to avoid overlapping error bars when two series share x-axis
- values.
-
- Returns
- -------
- errlines : list
- List of `~mpl_toolkits.mplot3d.art3d.Line3DCollection` instances
- each containing an errorbar line.
- caplines : list
- List of `~mpl_toolkits.mplot3d.art3d.Line3D` instances each
- containing a capline object.
- limmarks : list
- List of `~mpl_toolkits.mplot3d.art3d.Line3D` instances each
- containing a marker with an upper or lower limit.
-
- Other Parameters
- ----------------
- data : indexable object, optional
- DATA_PARAMETER_PLACEHOLDER
-
- **kwargs
- All other keyword arguments for styling errorbar lines are passed
- `~mpl_toolkits.mplot3d.art3d.Line3DCollection`.
-
- Examples
- --------
- .. plot:: gallery/mplot3d/errorbar3d.py
- """
- had_data = self.has_data()
-
- kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)
- # Drop anything that comes in as None to use the default instead.
- kwargs = {k: v for k, v in kwargs.items() if v is not None}
- kwargs.setdefault('zorder', 2)
-
- self._process_unit_info([("x", x), ("y", y), ("z", z)], kwargs,
- convert=False)
-
- # make sure all the args are iterable; use lists not arrays to
- # preserve units
- x = x if np.iterable(x) else [x]
- y = y if np.iterable(y) else [y]
- z = z if np.iterable(z) else [z]
-
- if not len(x) == len(y) == len(z):
- raise ValueError("'x', 'y', and 'z' must have the same size")
-
- everymask = self._errorevery_to_mask(x, errorevery)
-
- label = kwargs.pop("label", None)
- kwargs['label'] = '_nolegend_'
-
- # Create the main line and determine overall kwargs for child artists.
- # We avoid calling self.plot() directly, or self._get_lines(), because
- # that would call self._process_unit_info again, and do other indirect
- # data processing.
- (data_line, base_style), = self._get_lines._plot_args(
- self, (x, y) if fmt == '' else (x, y, fmt), kwargs, return_kwargs=True)
- art3d.line_2d_to_3d(data_line, zs=z)
-
- # Do this after creating `data_line` to avoid modifying `base_style`.
- if barsabove:
- data_line.set_zorder(kwargs['zorder'] - .1)
- else:
- data_line.set_zorder(kwargs['zorder'] + .1)
-
- # Add line to plot, or throw it away and use it to determine kwargs.
- if fmt.lower() != 'none':
- self.add_line(data_line)
- else:
- data_line = None
- # Remove alpha=0 color that _process_plot_format returns.
- base_style.pop('color')
-
- if 'color' not in base_style:
- base_style['color'] = 'C0'
- if ecolor is None:
- ecolor = base_style['color']
-
- # Eject any line-specific information from format string, as it's not
- # needed for bars or caps.
- for key in ['marker', 'markersize', 'markerfacecolor',
- 'markeredgewidth', 'markeredgecolor', 'markevery',
- 'linestyle', 'fillstyle', 'drawstyle', 'dash_capstyle',
- 'dash_joinstyle', 'solid_capstyle', 'solid_joinstyle']:
- base_style.pop(key, None)
-
- # Make the style dict for the line collections (the bars).
- eb_lines_style = {**base_style, 'color': ecolor}
-
- if elinewidth:
- eb_lines_style['linewidth'] = elinewidth
- elif 'linewidth' in kwargs:
- eb_lines_style['linewidth'] = kwargs['linewidth']
-
- for key in ('transform', 'alpha', 'zorder', 'rasterized'):
- if key in kwargs:
- eb_lines_style[key] = kwargs[key]
-
- # Make the style dict for caps (the "hats").
- eb_cap_style = {**base_style, 'linestyle': 'None'}
- if capsize is None:
- capsize = mpl.rcParams["errorbar.capsize"]
- if capsize > 0:
- eb_cap_style['markersize'] = 2. * capsize
- if capthick is not None:
- eb_cap_style['markeredgewidth'] = capthick
- eb_cap_style['color'] = ecolor
-
- def _apply_mask(arrays, mask):
- # Return, for each array in *arrays*, the elements for which *mask*
- # is True, without using fancy indexing.
- return [[*itertools.compress(array, mask)] for array in arrays]
-
- def _extract_errs(err, data, lomask, himask):
- # For separate +/- error values we need to unpack err
- if len(err.shape) == 2:
- low_err, high_err = err
- else:
- low_err, high_err = err, err
-
- lows = np.where(lomask | ~everymask, data, data - low_err)
- highs = np.where(himask | ~everymask, data, data + high_err)
-
- return lows, highs
-
- # collect drawn items while looping over the three coordinates
- errlines, caplines, limmarks = [], [], []
-
- # list of endpoint coordinates, used for auto-scaling
- coorderrs = []
-
- # define the markers used for errorbar caps and limits below
- # the dictionary key is mapped by the `i_xyz` helper dictionary
- capmarker = {0: '|', 1: '|', 2: '_'}
- i_xyz = {'x': 0, 'y': 1, 'z': 2}
-
- # Calculate marker size from points to quiver length. Because these are
- # not markers, and 3D Axes do not use the normal transform stack, this
- # is a bit involved. Since the quiver arrows will change size as the
- # scene is rotated, they are given a standard size based on viewing
- # them directly in planar form.
- quiversize = eb_cap_style.get('markersize',
- mpl.rcParams['lines.markersize']) ** 2
- quiversize *= self.figure.dpi / 72
- quiversize = self.transAxes.inverted().transform([
- (0, 0), (quiversize, quiversize)])
- quiversize = np.mean(np.diff(quiversize, axis=0))
- # quiversize is now in Axes coordinates, and to convert back to data
- # coordinates, we need to run it through the inverse 3D transform. For
- # consistency, this uses a fixed elevation, azimuth, and roll.
- with cbook._setattr_cm(self, elev=0, azim=0, roll=0):
- invM = np.linalg.inv(self.get_proj())
- # elev=azim=roll=0 produces the Y-Z plane, so quiversize in 2D 'x' is
- # 'y' in 3D, hence the 1 index.
- quiversize = np.dot(invM, [quiversize, 0, 0, 0])[1]
- # Quivers use a fixed 15-degree arrow head, so scale up the length so
- # that the size corresponds to the base. In other words, this constant
- # corresponds to the equation tan(15) = (base / 2) / (arrow length).
- quiversize *= 1.8660254037844388
- eb_quiver_style = {**eb_cap_style,
- 'length': quiversize, 'arrow_length_ratio': 1}
- eb_quiver_style.pop('markersize', None)
-
- # loop over x-, y-, and z-direction and draw relevant elements
- for zdir, data, err, lolims, uplims in zip(
- ['x', 'y', 'z'], [x, y, z], [xerr, yerr, zerr],
- [xlolims, ylolims, zlolims], [xuplims, yuplims, zuplims]):
-
- dir_vector = art3d.get_dir_vector(zdir)
- i_zdir = i_xyz[zdir]
-
- if err is None:
- continue
-
- if not np.iterable(err):
- err = [err] * len(data)
-
- err = np.atleast_1d(err)
-
- # arrays fine here, they are booleans and hence not units
- lolims = np.broadcast_to(lolims, len(data)).astype(bool)
- uplims = np.broadcast_to(uplims, len(data)).astype(bool)
-
- # a nested list structure that expands to (xl,xh),(yl,yh),(zl,zh),
- # where x/y/z and l/h correspond to dimensions and low/high
- # positions of errorbars in a dimension we're looping over
- coorderr = [
- _extract_errs(err * dir_vector[i], coord, lolims, uplims)
- for i, coord in enumerate([x, y, z])]
- (xl, xh), (yl, yh), (zl, zh) = coorderr
-
- # draws capmarkers - flat caps orthogonal to the error bars
- nolims = ~(lolims | uplims)
- if nolims.any() and capsize > 0:
- lo_caps_xyz = _apply_mask([xl, yl, zl], nolims & everymask)
- hi_caps_xyz = _apply_mask([xh, yh, zh], nolims & everymask)
-
- # setting '_' for z-caps and '|' for x- and y-caps;
- # these markers will rotate as the viewing angle changes
- cap_lo = art3d.Line3D(*lo_caps_xyz, ls='',
- marker=capmarker[i_zdir],
- **eb_cap_style)
- cap_hi = art3d.Line3D(*hi_caps_xyz, ls='',
- marker=capmarker[i_zdir],
- **eb_cap_style)
- self.add_line(cap_lo)
- self.add_line(cap_hi)
- caplines.append(cap_lo)
- caplines.append(cap_hi)
-
- if lolims.any():
- xh0, yh0, zh0 = _apply_mask([xh, yh, zh], lolims & everymask)
- self.quiver(xh0, yh0, zh0, *dir_vector, **eb_quiver_style)
- if uplims.any():
- xl0, yl0, zl0 = _apply_mask([xl, yl, zl], uplims & everymask)
- self.quiver(xl0, yl0, zl0, *-dir_vector, **eb_quiver_style)
-
- errline = art3d.Line3DCollection(np.array(coorderr).T,
- **eb_lines_style)
- self.add_collection(errline)
- errlines.append(errline)
- coorderrs.append(coorderr)
-
- coorderrs = np.array(coorderrs)
-
- def _digout_minmax(err_arr, coord_label):
- return (np.nanmin(err_arr[:, i_xyz[coord_label], :, :]),
- np.nanmax(err_arr[:, i_xyz[coord_label], :, :]))
-
- minx, maxx = _digout_minmax(coorderrs, 'x')
- miny, maxy = _digout_minmax(coorderrs, 'y')
- minz, maxz = _digout_minmax(coorderrs, 'z')
- self.auto_scale_xyz((minx, maxx), (miny, maxy), (minz, maxz), had_data)
-
- # Adapting errorbar containers for 3d case, assuming z-axis points "up"
- errorbar_container = mcontainer.ErrorbarContainer(
- (data_line, tuple(caplines), tuple(errlines)),
- has_xerr=(xerr is not None or yerr is not None),
- has_yerr=(zerr is not None),
- label=label)
- self.containers.append(errorbar_container)
-
- return errlines, caplines, limmarks
-
- @_api.make_keyword_only("3.8", "call_axes_locator")
- def get_tightbbox(self, renderer=None, call_axes_locator=True,
- bbox_extra_artists=None, *, for_layout_only=False):
- ret = super().get_tightbbox(renderer,
- call_axes_locator=call_axes_locator,
- bbox_extra_artists=bbox_extra_artists,
- for_layout_only=for_layout_only)
- batch = [ret]
- if self._axis3don:
- for axis in self._axis_map.values():
- if axis.get_visible():
- axis_bb = martist._get_tightbbox_for_layout_only(
- axis, renderer)
- if axis_bb:
- batch.append(axis_bb)
- return mtransforms.Bbox.union(batch)
-
- @_preprocess_data()
- def stem(self, x, y, z, *, linefmt='C0-', markerfmt='C0o', basefmt='C3-',
- bottom=0, label=None, orientation='z'):
- """
- Create a 3D stem plot.
-
- A stem plot draws lines perpendicular to a baseline, and places markers
- at the heads. By default, the baseline is defined by *x* and *y*, and
- stems are drawn vertically from *bottom* to *z*.
-
- Parameters
- ----------
- x, y, z : array-like
- The positions of the heads of the stems. The stems are drawn along
- the *orientation*-direction from the baseline at *bottom* (in the
- *orientation*-coordinate) to the heads. By default, the *x* and *y*
- positions are used for the baseline and *z* for the head position,
- but this can be changed by *orientation*.
-
- linefmt : str, default: 'C0-'
- A string defining the properties of the vertical lines. Usually,
- this will be a color or a color and a linestyle:
-
- ========= =============
- Character Line Style
- ========= =============
- ``'-'`` solid line
- ``'--'`` dashed line
- ``'-.'`` dash-dot line
- ``':'`` dotted line
- ========= =============
-
- Note: While it is technically possible to specify valid formats
- other than color or color and linestyle (e.g. 'rx' or '-.'), this
- is beyond the intention of the method and will most likely not
- result in a reasonable plot.
-
- markerfmt : str, default: 'C0o'
- A string defining the properties of the markers at the stem heads.
-
- basefmt : str, default: 'C3-'
- A format string defining the properties of the baseline.
-
- bottom : float, default: 0
- The position of the baseline, in *orientation*-coordinates.
-
- label : str, default: None
- The label to use for the stems in legends.
-
- orientation : {'x', 'y', 'z'}, default: 'z'
- The direction along which stems are drawn.
-
- data : indexable object, optional
- DATA_PARAMETER_PLACEHOLDER
-
- Returns
- -------
- `.StemContainer`
- The container may be treated like a tuple
- (*markerline*, *stemlines*, *baseline*)
-
- Examples
- --------
- .. plot:: gallery/mplot3d/stem3d_demo.py
- """
-
- from matplotlib.container import StemContainer
-
- had_data = self.has_data()
-
- _api.check_in_list(['x', 'y', 'z'], orientation=orientation)
-
- xlim = (np.min(x), np.max(x))
- ylim = (np.min(y), np.max(y))
- zlim = (np.min(z), np.max(z))
-
- # Determine the appropriate plane for the baseline and the direction of
- # stemlines based on the value of orientation.
- if orientation == 'x':
- basex, basexlim = y, ylim
- basey, baseylim = z, zlim
- lines = [[(bottom, thisy, thisz), (thisx, thisy, thisz)]
- for thisx, thisy, thisz in zip(x, y, z)]
- elif orientation == 'y':
- basex, basexlim = x, xlim
- basey, baseylim = z, zlim
- lines = [[(thisx, bottom, thisz), (thisx, thisy, thisz)]
- for thisx, thisy, thisz in zip(x, y, z)]
- else:
- basex, basexlim = x, xlim
- basey, baseylim = y, ylim
- lines = [[(thisx, thisy, bottom), (thisx, thisy, thisz)]
- for thisx, thisy, thisz in zip(x, y, z)]
-
- # Determine style for stem lines.
- linestyle, linemarker, linecolor = _process_plot_format(linefmt)
- if linestyle is None:
- linestyle = mpl.rcParams['lines.linestyle']
-
- # Plot everything in required order.
- baseline, = self.plot(basex, basey, basefmt, zs=bottom,
- zdir=orientation, label='_nolegend_')
- stemlines = art3d.Line3DCollection(
- lines, linestyles=linestyle, colors=linecolor, label='_nolegend_')
- self.add_collection(stemlines)
- markerline, = self.plot(x, y, z, markerfmt, label='_nolegend_')
-
- stem_container = StemContainer((markerline, stemlines, baseline),
- label=label)
- self.add_container(stem_container)
-
- jx, jy, jz = art3d.juggle_axes(basexlim, baseylim, [bottom, bottom],
- orientation)
- self.auto_scale_xyz([*jx, *xlim], [*jy, *ylim], [*jz, *zlim], had_data)
-
- return stem_container
-
- stem3D = stem
-
-
-def get_test_data(delta=0.05):
- """Return a tuple X, Y, Z with a test data set."""
- x = y = np.arange(-3.0, 3.0, delta)
- X, Y = np.meshgrid(x, y)
-
- Z1 = np.exp(-(X**2 + Y**2) / 2) / (2 * np.pi)
- Z2 = (np.exp(-(((X - 1) / 1.5)**2 + ((Y - 1) / 0.5)**2) / 2) /
- (2 * np.pi * 0.5 * 1.5))
- Z = Z2 - Z1
-
- X = X * 10
- Y = Y * 10
- Z = Z * 500
- return X, Y, Z
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/_typing/_nbit.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/_typing/_nbit.py
deleted file mode 100644
index b8d35db4f5947fc1fc7f4672c3510f4a4264da6f..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/numpy/_typing/_nbit.py
+++ /dev/null
@@ -1,16 +0,0 @@
-"""A module with the precisions of platform-specific `~numpy.number`s."""
-
-from typing import Any
-
-# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin
-_NBitByte = Any
-_NBitShort = Any
-_NBitIntC = Any
-_NBitIntP = Any
-_NBitInt = Any
-_NBitLongLong = Any
-
-_NBitHalf = Any
-_NBitSingle = Any
-_NBitDouble = Any
-_NBitLongDouble = Any
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/util/test_validate_args.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/util/test_validate_args.py
deleted file mode 100644
index eef0931ec28efd02e3db7a85b0b3260742c1ff2d..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/util/test_validate_args.py
+++ /dev/null
@@ -1,70 +0,0 @@
-import pytest
-
-from pandas.util._validators import validate_args
-
-
-@pytest.fixture
-def _fname():
- return "func"
-
-
-def test_bad_min_fname_arg_count(_fname):
- msg = "'max_fname_arg_count' must be non-negative"
-
- with pytest.raises(ValueError, match=msg):
- validate_args(_fname, (None,), -1, "foo")
-
-
-def test_bad_arg_length_max_value_single(_fname):
- args = (None, None)
- compat_args = ("foo",)
-
- min_fname_arg_count = 0
- max_length = len(compat_args) + min_fname_arg_count
- actual_length = len(args) + min_fname_arg_count
- msg = (
- rf"{_fname}\(\) takes at most {max_length} "
- rf"argument \({actual_length} given\)"
- )
-
- with pytest.raises(TypeError, match=msg):
- validate_args(_fname, args, min_fname_arg_count, compat_args)
-
-
-def test_bad_arg_length_max_value_multiple(_fname):
- args = (None, None)
- compat_args = {"foo": None}
-
- min_fname_arg_count = 2
- max_length = len(compat_args) + min_fname_arg_count
- actual_length = len(args) + min_fname_arg_count
- msg = (
- rf"{_fname}\(\) takes at most {max_length} "
- rf"arguments \({actual_length} given\)"
- )
-
- with pytest.raises(TypeError, match=msg):
- validate_args(_fname, args, min_fname_arg_count, compat_args)
-
-
-@pytest.mark.parametrize("i", range(1, 3))
-def test_not_all_defaults(i, _fname):
- bad_arg = "foo"
- msg = (
- f"the '{bad_arg}' parameter is not supported "
- rf"in the pandas implementation of {_fname}\(\)"
- )
-
- compat_args = {"foo": 2, "bar": -1, "baz": 3}
- arg_vals = (1, -1, 3)
-
- with pytest.raises(ValueError, match=msg):
- validate_args(_fname, arg_vals[:i], 2, compat_args)
-
-
-def test_validation(_fname):
- # No exceptions should be raised.
- validate_args(_fname, (None,), 2, {"out": None})
-
- compat_args = {"axis": 1, "out": None}
- validate_args(_fname, (1, None), 2, compat_args)
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/util/_doctools.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/util/_doctools.py
deleted file mode 100644
index 12619abf4baaf336dfd3d5ae78a9bc2133f310c0..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/util/_doctools.py
+++ /dev/null
@@ -1,202 +0,0 @@
-from __future__ import annotations
-
-from typing import TYPE_CHECKING
-
-import numpy as np
-
-import pandas as pd
-
-if TYPE_CHECKING:
- from collections.abc import Iterable
-
-
-class TablePlotter:
- """
- Layout some DataFrames in vertical/horizontal layout for explanation.
- Used in merging.rst
- """
-
- def __init__(
- self,
- cell_width: float = 0.37,
- cell_height: float = 0.25,
- font_size: float = 7.5,
- ) -> None:
- self.cell_width = cell_width
- self.cell_height = cell_height
- self.font_size = font_size
-
- def _shape(self, df: pd.DataFrame) -> tuple[int, int]:
- """
- Calculate table shape considering index levels.
- """
- row, col = df.shape
- return row + df.columns.nlevels, col + df.index.nlevels
-
- def _get_cells(self, left, right, vertical) -> tuple[int, int]:
- """
- Calculate appropriate figure size based on left and right data.
- """
- if vertical:
- # calculate required number of cells
- vcells = max(sum(self._shape(df)[0] for df in left), self._shape(right)[0])
- hcells = max(self._shape(df)[1] for df in left) + self._shape(right)[1]
- else:
- vcells = max([self._shape(df)[0] for df in left] + [self._shape(right)[0]])
- hcells = sum([self._shape(df)[1] for df in left] + [self._shape(right)[1]])
- return hcells, vcells
-
- def plot(self, left, right, labels: Iterable[str] = (), vertical: bool = True):
- """
- Plot left / right DataFrames in specified layout.
-
- Parameters
- ----------
- left : list of DataFrames before operation is applied
- right : DataFrame of operation result
- labels : list of str to be drawn as titles of left DataFrames
- vertical : bool, default True
- If True, use vertical layout. If False, use horizontal layout.
- """
- from matplotlib import gridspec
- import matplotlib.pyplot as plt
-
- if not isinstance(left, list):
- left = [left]
- left = [self._conv(df) for df in left]
- right = self._conv(right)
-
- hcells, vcells = self._get_cells(left, right, vertical)
-
- if vertical:
- figsize = self.cell_width * hcells, self.cell_height * vcells
- else:
- # include margin for titles
- figsize = self.cell_width * hcells, self.cell_height * vcells
- fig = plt.figure(figsize=figsize)
-
- if vertical:
- gs = gridspec.GridSpec(len(left), hcells)
- # left
- max_left_cols = max(self._shape(df)[1] for df in left)
- max_left_rows = max(self._shape(df)[0] for df in left)
- for i, (_left, _label) in enumerate(zip(left, labels)):
- ax = fig.add_subplot(gs[i, 0:max_left_cols])
- self._make_table(ax, _left, title=_label, height=1.0 / max_left_rows)
- # right
- ax = plt.subplot(gs[:, max_left_cols:])
- self._make_table(ax, right, title="Result", height=1.05 / vcells)
- fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95)
- else:
- max_rows = max(self._shape(df)[0] for df in left + [right])
- height = 1.0 / np.max(max_rows)
- gs = gridspec.GridSpec(1, hcells)
- # left
- i = 0
- for df, _label in zip(left, labels):
- sp = self._shape(df)
- ax = fig.add_subplot(gs[0, i : i + sp[1]])
- self._make_table(ax, df, title=_label, height=height)
- i += sp[1]
- # right
- ax = plt.subplot(gs[0, i:])
- self._make_table(ax, right, title="Result", height=height)
- fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95)
-
- return fig
-
- def _conv(self, data):
- """
- Convert each input to appropriate for table outplot.
- """
- if isinstance(data, pd.Series):
- if data.name is None:
- data = data.to_frame(name="")
- else:
- data = data.to_frame()
- data = data.fillna("NaN")
- return data
-
- def _insert_index(self, data):
- # insert is destructive
- data = data.copy()
- idx_nlevels = data.index.nlevels
- if idx_nlevels == 1:
- data.insert(0, "Index", data.index)
- else:
- for i in range(idx_nlevels):
- data.insert(i, f"Index{i}", data.index._get_level_values(i))
-
- col_nlevels = data.columns.nlevels
- if col_nlevels > 1:
- col = data.columns._get_level_values(0)
- values = [
- data.columns._get_level_values(i)._values for i in range(1, col_nlevels)
- ]
- col_df = pd.DataFrame(values)
- data.columns = col_df.columns
- data = pd.concat([col_df, data])
- data.columns = col
- return data
-
- def _make_table(self, ax, df, title: str, height: float | None = None) -> None:
- if df is None:
- ax.set_visible(False)
- return
-
- from pandas import plotting
-
- idx_nlevels = df.index.nlevels
- col_nlevels = df.columns.nlevels
- # must be convert here to get index levels for colorization
- df = self._insert_index(df)
- tb = plotting.table(ax, df, loc=9)
- tb.set_fontsize(self.font_size)
-
- if height is None:
- height = 1.0 / (len(df) + 1)
-
- props = tb.properties()
- for (r, c), cell in props["celld"].items():
- if c == -1:
- cell.set_visible(False)
- elif r < col_nlevels and c < idx_nlevels:
- cell.set_visible(False)
- elif r < col_nlevels or c < idx_nlevels:
- cell.set_facecolor("#AAAAAA")
- cell.set_height(height)
-
- ax.set_title(title, size=self.font_size)
- ax.axis("off")
-
-
-def main() -> None:
- import matplotlib.pyplot as plt
-
- p = TablePlotter()
-
- df1 = pd.DataFrame({"A": [10, 11, 12], "B": [20, 21, 22], "C": [30, 31, 32]})
- df2 = pd.DataFrame({"A": [10, 12], "C": [30, 32]})
-
- p.plot([df1, df2], pd.concat([df1, df2]), labels=["df1", "df2"], vertical=True)
- plt.show()
-
- df3 = pd.DataFrame({"X": [10, 12], "Z": [30, 32]})
-
- p.plot(
- [df1, df3], pd.concat([df1, df3], axis=1), labels=["df1", "df2"], vertical=False
- )
- plt.show()
-
- idx = pd.MultiIndex.from_tuples(
- [(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")]
- )
- column = pd.MultiIndex.from_tuples([(1, "A"), (1, "B")])
- df3 = pd.DataFrame({"v1": [1, 2, 3, 4, 5, 6], "v2": [5, 6, 7, 8, 9, 10]}, index=idx)
- df3.columns = column
- p.plot(df3, df3, labels=["df3"])
- plt.show()
-
-
-if __name__ == "__main__":
- main()
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/go.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/go.py
deleted file mode 100644
index fe4a1846d6ce687e567a71074a00224023c3c229..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/go.py
+++ /dev/null
@@ -1,98 +0,0 @@
-"""
- pygments.lexers.go
- ~~~~~~~~~~~~~~~~~~
-
- Lexers for the Google Go language.
-
- :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from pygments.lexer import RegexLexer, bygroups, words
-from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
- Number, Punctuation, Whitespace
-
-__all__ = ['GoLexer']
-
-
-class GoLexer(RegexLexer):
- """
- For Go source.
-
- .. versionadded:: 1.2
- """
- name = 'Go'
- url = 'https://go.dev/'
- filenames = ['*.go']
- aliases = ['go', 'golang']
- mimetypes = ['text/x-gosrc']
-
- tokens = {
- 'root': [
- (r'\n', Whitespace),
- (r'\s+', Whitespace),
- (r'(\\)(\n)', bygroups(Text, Whitespace)), # line continuations
- (r'//(.*?)$', Comment.Single),
- (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
- (r'(import|package)\b', Keyword.Namespace),
- (r'(var|func|struct|map|chan|type|interface|const)\b',
- Keyword.Declaration),
- (words((
- 'break', 'default', 'select', 'case', 'defer', 'go',
- 'else', 'goto', 'switch', 'fallthrough', 'if', 'range',
- 'continue', 'for', 'return'), suffix=r'\b'),
- Keyword),
- (r'(true|false|iota|nil)\b', Keyword.Constant),
- # It seems the builtin types aren't actually keywords, but
- # can be used as functions. So we need two declarations.
- (words((
- 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
- 'int', 'int8', 'int16', 'int32', 'int64',
- 'float', 'float32', 'float64',
- 'complex64', 'complex128', 'byte', 'rune',
- 'string', 'bool', 'error', 'uintptr', 'any', 'comparable',
- 'print', 'println', 'panic', 'recover', 'close', 'complex',
- 'real', 'imag', 'len', 'cap', 'append', 'copy', 'delete',
- 'new', 'make', 'min', 'max', 'clear'), suffix=r'\b(\()'),
- bygroups(Name.Builtin, Punctuation)),
- (words((
- 'uint', 'uint8', 'uint16', 'uint32', 'uint64',
- 'int', 'int8', 'int16', 'int32', 'int64',
- 'float', 'float32', 'float64',
- 'complex64', 'complex128', 'byte', 'rune',
- 'string', 'bool', 'error', 'uintptr', 'any', 'comparable'), suffix=r'\b'),
- Keyword.Type),
- # imaginary_lit
- (r'\d+i', Number),
- (r'\d+\.\d*([Ee][-+]\d+)?i', Number),
- (r'\.\d+([Ee][-+]\d+)?i', Number),
- (r'\d+[Ee][-+]\d+i', Number),
- # float_lit
- (r'\d+(\.\d+[eE][+\-]?\d+|'
- r'\.\d*|[eE][+\-]?\d+)', Number.Float),
- (r'\.\d+([eE][+\-]?\d+)?', Number.Float),
- # int_lit
- # -- octal_lit
- (r'0[0-7]+', Number.Oct),
- # -- hex_lit
- (r'0[xX][0-9a-fA-F]+', Number.Hex),
- # -- decimal_lit
- (r'(0|[1-9][0-9]*)', Number.Integer),
- # char_lit
- (r"""'(\\['"\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-7]{1,3}"""
- r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|[^\\])'""",
- String.Char),
- # StringLiteral
- # -- raw_string_lit
- (r'`[^`]*`', String),
- # -- interpreted_string_lit
- (r'"(\\\\|\\[^\\]|[^"\\])*"', String),
- # Tokens
- (r'(<<=|>>=|<<|>>|<=|>=|&\^=|&\^|\+=|-=|\*=|/=|%=|&=|\|=|&&|\|\|'
- r'|<-|\+\+|--|==|!=|:=|\.\.\.|[+\-*/%&]'
- r'|~|\|)', Operator),
- (r'[|^<>=!()\[\]{}.,;:]', Punctuation),
- # identifier
- (r'[^\W\d]\w*', Name.Other),
- ]
- }
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/theorem.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/theorem.py
deleted file mode 100644
index 529451adc63fab673eef07b2ee865c0711ea1285..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pygments/lexers/theorem.py
+++ /dev/null
@@ -1,493 +0,0 @@
-"""
- pygments.lexers.theorem
- ~~~~~~~~~~~~~~~~~~~~~~~
-
- Lexers for theorem-proving languages.
-
- :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import re
-
-from pygments.lexer import RegexLexer, default, words, include
-from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
- Number, Punctuation, Generic, Whitespace
-
-__all__ = ['CoqLexer', 'IsabelleLexer', 'LeanLexer']
-
-
-class CoqLexer(RegexLexer):
- """
- For the Coq theorem prover.
-
- .. versionadded:: 1.5
- """
-
- name = 'Coq'
- url = 'http://coq.inria.fr/'
- aliases = ['coq']
- filenames = ['*.v']
- mimetypes = ['text/x-coq']
-
- flags = 0 # no re.MULTILINE
-
- keywords1 = (
- # Vernacular commands
- 'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable',
- 'Variables', 'Parameter', 'Parameters', 'Axiom', 'Axioms', 'Hypothesis',
- 'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope',
- 'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Example', 'Let',
- 'Ltac', 'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit',
- 'Arguments', 'Types', 'Unset', 'Contextual', 'Strict', 'Prenex',
- 'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure',
- 'Variant', 'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Fact',
- 'Remark', 'Corollary', 'Proposition', 'Property', 'Goal',
- 'Proof', 'Restart', 'Save', 'Qed', 'Defined', 'Abort', 'Admitted',
- 'Hint', 'Resolve', 'Rewrite', 'View', 'Search', 'Compute', 'Eval',
- 'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside',
- 'outside', 'Check', 'Global', 'Instance', 'Class', 'Existing',
- 'Universe', 'Polymorphic', 'Monomorphic', 'Context', 'Scheme', 'From',
- 'Undo', 'Fail', 'Function',
- )
- keywords2 = (
- # Gallina
- 'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct',
- 'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else',
- 'for', 'of', 'nosimpl', 'with', 'as',
- )
- keywords3 = (
- # Sorts
- 'Type', 'Prop', 'SProp', 'Set',
- )
- keywords4 = (
- # Tactics
- 'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro',
- 'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct',
- 'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite',
- 'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold',
- 'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog',
- 'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial',
- 'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto',
- 'split', 'left', 'right', 'autorewrite', 'tauto', 'setoid_rewrite',
- 'intuition', 'eauto', 'eapply', 'econstructor', 'etransitivity',
- 'constructor', 'erewrite', 'red', 'cbv', 'lazy', 'vm_compute',
- 'native_compute', 'subst',
- )
- keywords5 = (
- # Terminators
- 'by', 'now', 'done', 'exact', 'reflexivity',
- 'tauto', 'romega', 'omega', 'lia', 'nia', 'lra', 'nra', 'psatz',
- 'assumption', 'solve', 'contradiction', 'discriminate',
- 'congruence', 'admit'
- )
- keywords6 = (
- # Control
- 'do', 'last', 'first', 'try', 'idtac', 'repeat',
- )
- # 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
- # 'downto', 'else', 'end', 'exception', 'external', 'false',
- # 'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
- # 'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
- # 'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
- # 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
- # 'type', 'val', 'virtual', 'when', 'while', 'with'
- keyopts = (
- '!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-', r'-\.',
- '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<', '<-',
- '<->', '=', '>', '>]', r'>\}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
- r'\[\|', ']', '_', '`', r'\{', r'\{<', r'\|', r'\|]', r'\}', '~', '=>',
- r'/\\', r'\\/', r'\{\|', r'\|\}',
- # 'Π', 'Σ', # Not defined in the standard library
- 'λ', '¬', '∧', '∨', '∀', '∃', '→', '↔', '≠', '≤', '≥',
- )
- operators = r'[!$%&*+\./:<=>?@^|~-]'
- prefix_syms = r'[!?~]'
- infix_syms = r'[=<>@^|&+\*/$%-]'
-
- tokens = {
- 'root': [
- (r'\s+', Text),
- (r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
- (r'\(\*', Comment, 'comment'),
- (r'\b(?:[^\W\d][\w\']*\.)+[^\W\d][\w\']*\b', Name),
- (r'\bEquations\b\??', Keyword.Namespace),
- # Very weak heuristic to distinguish the Set vernacular from the Set sort
- (r'\bSet(?=[ \t]+[A-Z][a-z][^\n]*?\.)', Keyword.Namespace),
- (words(keywords1, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
- (words(keywords2, prefix=r'\b', suffix=r'\b'), Keyword),
- (words(keywords3, prefix=r'\b', suffix=r'\b'), Keyword.Type),
- (words(keywords4, prefix=r'\b', suffix=r'\b'), Keyword),
- (words(keywords5, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
- (words(keywords6, prefix=r'\b', suffix=r'\b'), Keyword.Reserved),
- # (r'\b([A-Z][\w\']*)(\.)', Name.Namespace, 'dotted'),
- (r'\b([A-Z][\w\']*)', Name),
- (r'(%s)' % '|'.join(keyopts[::-1]), Operator),
- (r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
-
- (r"[^\W\d][\w']*", Name),
-
- (r'\d[\d_]*', Number.Integer),
- (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
- (r'0[oO][0-7][0-7_]*', Number.Oct),
- (r'0[bB][01][01_]*', Number.Bin),
- (r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
-
- (r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'", String.Char),
-
- (r"'.'", String.Char),
- (r"'", Keyword), # a stray quote is another syntax element
-
- (r'"', String.Double, 'string'),
-
- (r'[~?][a-z][\w\']*:', Name),
- (r'\S', Name.Builtin.Pseudo),
- ],
- 'comment': [
- (r'[^(*)]+', Comment),
- (r'\(\*', Comment, '#push'),
- (r'\*\)', Comment, '#pop'),
- (r'[(*)]', Comment),
- ],
- 'string': [
- (r'[^"]+', String.Double),
- (r'""', String.Double),
- (r'"', String.Double, '#pop'),
- ],
- 'dotted': [
- (r'\s+', Text),
- (r'\.', Punctuation),
- (r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
- (r'[A-Z][\w\']*', Name.Class, '#pop'),
- (r'[a-z][a-z0-9_\']*', Name, '#pop'),
- default('#pop')
- ],
- }
-
- def analyse_text(text):
- if 'Qed' in text and 'Proof' in text:
- return 1
-
-
-class IsabelleLexer(RegexLexer):
- """
- For the Isabelle proof assistant.
-
- .. versionadded:: 2.0
- """
-
- name = 'Isabelle'
- url = 'https://isabelle.in.tum.de/'
- aliases = ['isabelle']
- filenames = ['*.thy']
- mimetypes = ['text/x-isabelle']
-
- keyword_minor = (
- 'and', 'assumes', 'attach', 'avoids', 'binder', 'checking',
- 'class_instance', 'class_relation', 'code_module', 'congs',
- 'constant', 'constrains', 'datatypes', 'defines', 'file', 'fixes',
- 'for', 'functions', 'hints', 'identifier', 'if', 'imports', 'in',
- 'includes', 'infix', 'infixl', 'infixr', 'is', 'keywords', 'lazy',
- 'module_name', 'monos', 'morphisms', 'no_discs_sels', 'notes',
- 'obtains', 'open', 'output', 'overloaded', 'parametric', 'permissive',
- 'pervasive', 'rep_compat', 'shows', 'structure', 'type_class',
- 'type_constructor', 'unchecked', 'unsafe', 'where',
- )
-
- keyword_diag = (
- 'ML_command', 'ML_val', 'class_deps', 'code_deps', 'code_thms',
- 'display_drafts', 'find_consts', 'find_theorems', 'find_unused_assms',
- 'full_prf', 'help', 'locale_deps', 'nitpick', 'pr', 'prf',
- 'print_abbrevs', 'print_antiquotations', 'print_attributes',
- 'print_binds', 'print_bnfs', 'print_bundles',
- 'print_case_translations', 'print_cases', 'print_claset',
- 'print_classes', 'print_codeproc', 'print_codesetup',
- 'print_coercions', 'print_commands', 'print_context',
- 'print_defn_rules', 'print_dependencies', 'print_facts',
- 'print_induct_rules', 'print_inductives', 'print_interps',
- 'print_locale', 'print_locales', 'print_methods', 'print_options',
- 'print_orders', 'print_quot_maps', 'print_quotconsts',
- 'print_quotients', 'print_quotientsQ3', 'print_quotmapsQ3',
- 'print_rules', 'print_simpset', 'print_state', 'print_statement',
- 'print_syntax', 'print_theorems', 'print_theory', 'print_trans_rules',
- 'prop', 'pwd', 'quickcheck', 'refute', 'sledgehammer', 'smt_status',
- 'solve_direct', 'spark_status', 'term', 'thm', 'thm_deps', 'thy_deps',
- 'try', 'try0', 'typ', 'unused_thms', 'value', 'values', 'welcome',
- 'print_ML_antiquotations', 'print_term_bindings', 'values_prolog',
- )
-
- keyword_thy = ('theory', 'begin', 'end')
-
- keyword_section = ('header', 'chapter')
-
- keyword_subsection = (
- 'section', 'subsection', 'subsubsection', 'sect', 'subsect',
- 'subsubsect',
- )
-
- keyword_theory_decl = (
- 'ML', 'ML_file', 'abbreviation', 'adhoc_overloading', 'arities',
- 'atom_decl', 'attribute_setup', 'axiomatization', 'bundle',
- 'case_of_simps', 'class', 'classes', 'classrel', 'codatatype',
- 'code_abort', 'code_class', 'code_const', 'code_datatype',
- 'code_identifier', 'code_include', 'code_instance', 'code_modulename',
- 'code_monad', 'code_printing', 'code_reflect', 'code_reserved',
- 'code_type', 'coinductive', 'coinductive_set', 'consts', 'context',
- 'datatype', 'datatype_new', 'datatype_new_compat', 'declaration',
- 'declare', 'default_sort', 'defer_recdef', 'definition', 'defs',
- 'domain', 'domain_isomorphism', 'domaindef', 'equivariance',
- 'export_code', 'extract', 'extract_type', 'fixrec', 'fun',
- 'fun_cases', 'hide_class', 'hide_const', 'hide_fact', 'hide_type',
- 'import_const_map', 'import_file', 'import_tptp', 'import_type_map',
- 'inductive', 'inductive_set', 'instantiation', 'judgment', 'lemmas',
- 'lifting_forget', 'lifting_update', 'local_setup', 'locale',
- 'method_setup', 'nitpick_params', 'no_adhoc_overloading',
- 'no_notation', 'no_syntax', 'no_translations', 'no_type_notation',
- 'nominal_datatype', 'nonterminal', 'notation', 'notepad', 'oracle',
- 'overloading', 'parse_ast_translation', 'parse_translation',
- 'partial_function', 'primcorec', 'primrec', 'primrec_new',
- 'print_ast_translation', 'print_translation', 'quickcheck_generator',
- 'quickcheck_params', 'realizability', 'realizers', 'recdef', 'record',
- 'refute_params', 'setup', 'setup_lifting', 'simproc_setup',
- 'simps_of_case', 'sledgehammer_params', 'spark_end', 'spark_open',
- 'spark_open_siv', 'spark_open_vcg', 'spark_proof_functions',
- 'spark_types', 'statespace', 'syntax', 'syntax_declaration', 'text',
- 'text_raw', 'theorems', 'translations', 'type_notation',
- 'type_synonym', 'typed_print_translation', 'typedecl', 'hoarestate',
- 'install_C_file', 'install_C_types', 'wpc_setup', 'c_defs', 'c_types',
- 'memsafe', 'SML_export', 'SML_file', 'SML_import', 'approximate',
- 'bnf_axiomatization', 'cartouche', 'datatype_compat',
- 'free_constructors', 'functor', 'nominal_function',
- 'nominal_termination', 'permanent_interpretation',
- 'binds', 'defining', 'smt2_status', 'term_cartouche',
- 'boogie_file', 'text_cartouche',
- )
-
- keyword_theory_script = ('inductive_cases', 'inductive_simps')
-
- keyword_theory_goal = (
- 'ax_specification', 'bnf', 'code_pred', 'corollary', 'cpodef',
- 'crunch', 'crunch_ignore',
- 'enriched_type', 'function', 'instance', 'interpretation', 'lemma',
- 'lift_definition', 'nominal_inductive', 'nominal_inductive2',
- 'nominal_primrec', 'pcpodef', 'primcorecursive',
- 'quotient_definition', 'quotient_type', 'recdef_tc', 'rep_datatype',
- 'schematic_corollary', 'schematic_lemma', 'schematic_theorem',
- 'spark_vc', 'specification', 'subclass', 'sublocale', 'termination',
- 'theorem', 'typedef', 'wrap_free_constructors',
- )
-
- keyword_qed = ('by', 'done', 'qed')
- keyword_abandon_proof = ('sorry', 'oops')
-
- keyword_proof_goal = ('have', 'hence', 'interpret')
-
- keyword_proof_block = ('next', 'proof')
-
- keyword_proof_chain = (
- 'finally', 'from', 'then', 'ultimately', 'with',
- )
-
- keyword_proof_decl = (
- 'ML_prf', 'also', 'include', 'including', 'let', 'moreover', 'note',
- 'txt', 'txt_raw', 'unfolding', 'using', 'write',
- )
-
- keyword_proof_asm = ('assume', 'case', 'def', 'fix', 'presume')
-
- keyword_proof_asm_goal = ('guess', 'obtain', 'show', 'thus')
-
- keyword_proof_script = (
- 'apply', 'apply_end', 'apply_trace', 'back', 'defer', 'prefer',
- )
-
- operators = (
- '::', ':', '(', ')', '[', ']', '_', '=', ',', '|',
- '+', '-', '!', '?',
- )
-
- proof_operators = ('{', '}', '.', '..')
-
- tokens = {
- 'root': [
- (r'\s+', Whitespace),
- (r'\(\*', Comment, 'comment'),
- (r'\\', String.Symbol, 'cartouche'),
- (r'\{\*|‹', String, 'cartouche'),
-
- (words(operators), Operator),
- (words(proof_operators), Operator.Word),
-
- (words(keyword_minor, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
-
- (words(keyword_diag, prefix=r'\b', suffix=r'\b'), Keyword.Type),
-
- (words(keyword_thy, prefix=r'\b', suffix=r'\b'), Keyword),
- (words(keyword_theory_decl, prefix=r'\b', suffix=r'\b'), Keyword),
-
- (words(keyword_section, prefix=r'\b', suffix=r'\b'), Generic.Heading),
- (words(keyword_subsection, prefix=r'\b', suffix=r'\b'), Generic.Subheading),
-
- (words(keyword_theory_goal, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
- (words(keyword_theory_script, prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
-
- (words(keyword_abandon_proof, prefix=r'\b', suffix=r'\b'), Generic.Error),
-
- (words(keyword_qed, prefix=r'\b', suffix=r'\b'), Keyword),
- (words(keyword_proof_goal, prefix=r'\b', suffix=r'\b'), Keyword),
- (words(keyword_proof_block, prefix=r'\b', suffix=r'\b'), Keyword),
- (words(keyword_proof_decl, prefix=r'\b', suffix=r'\b'), Keyword),
-
- (words(keyword_proof_chain, prefix=r'\b', suffix=r'\b'), Keyword),
- (words(keyword_proof_asm, prefix=r'\b', suffix=r'\b'), Keyword),
- (words(keyword_proof_asm_goal, prefix=r'\b', suffix=r'\b'), Keyword),
-
- (words(keyword_proof_script, prefix=r'\b', suffix=r'\b'), Keyword.Pseudo),
-
- (r'\\<(\w|\^)*>', Text.Symbol),
-
- (r"'[^\W\d][.\w']*", Name.Type),
-
- (r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
- (r'0[oO][0-7][0-7_]*', Number.Oct),
- (r'0[bB][01][01_]*', Number.Bin),
-
- (r'"', String, 'string'),
- (r'`', String.Other, 'fact'),
- (r'[^\s:|\[\]\-()=,+!?{}._][^\s:|\[\]\-()=,+!?{}]*', Name),
- ],
- 'comment': [
- (r'[^(*)]+', Comment),
- (r'\(\*', Comment, '#push'),
- (r'\*\)', Comment, '#pop'),
- (r'[(*)]', Comment),
- ],
- 'cartouche': [
- (r'[^{*}\\‹›]+', String),
- (r'\\', String.Symbol, '#push'),
- (r'\{\*|‹', String, '#push'),
- (r'\\', String.Symbol, '#pop'),
- (r'\*\}|›', String, '#pop'),
- (r'\\<(\w|\^)*>', String.Symbol),
- (r'[{*}\\]', String),
- ],
- 'string': [
- (r'[^"\\]+', String),
- (r'\\<(\w|\^)*>', String.Symbol),
- (r'\\"', String),
- (r'\\', String),
- (r'"', String, '#pop'),
- ],
- 'fact': [
- (r'[^`\\]+', String.Other),
- (r'\\<(\w|\^)*>', String.Symbol),
- (r'\\`', String.Other),
- (r'\\', String.Other),
- (r'`', String.Other, '#pop'),
- ],
- }
-
-
-class LeanLexer(RegexLexer):
- """
- For the Lean theorem prover.
-
- .. versionadded:: 2.0
- """
- name = 'Lean'
- url = 'https://github.com/leanprover/lean'
- aliases = ['lean']
- filenames = ['*.lean']
- mimetypes = ['text/x-lean']
-
- tokens = {
- 'expression': [
- (r'\s+', Text),
- (r'/--', String.Doc, 'docstring'),
- (r'/-', Comment, 'comment'),
- (r'--.*?$', Comment.Single),
- (words((
- 'forall', 'fun', 'Pi', 'from', 'have', 'show', 'assume', 'suffices',
- 'let', 'if', 'else', 'then', 'in', 'with', 'calc', 'match',
- 'do'
- ), prefix=r'\b', suffix=r'\b'), Keyword),
- (words(('sorry', 'admit'), prefix=r'\b', suffix=r'\b'), Generic.Error),
- (words(('Sort', 'Prop', 'Type'), prefix=r'\b', suffix=r'\b'), Keyword.Type),
- (words((
- '(', ')', ':', '{', '}', '[', ']', '⟨', '⟩', '‹', '›', '⦃', '⦄', ':=', ',',
- )), Operator),
- (r'[A-Za-z_\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2100-\u214f]'
- r'[.A-Za-z_\'\u03b1-\u03ba\u03bc-\u03fb\u1f00-\u1ffe\u2070-\u2079'
- r'\u207f-\u2089\u2090-\u209c\u2100-\u214f0-9]*', Name),
- (r'0x[A-Za-z0-9]+', Number.Integer),
- (r'0b[01]+', Number.Integer),
- (r'\d+', Number.Integer),
- (r'"', String.Double, 'string'),
- (r"'(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4})|.)'", String.Char),
- (r'[~?][a-z][\w\']*:', Name.Variable),
- (r'\S', Name.Builtin.Pseudo),
- ],
- 'root': [
- (words((
- 'import', 'renaming', 'hiding',
- 'namespace',
- 'local',
- 'private', 'protected', 'section',
- 'include', 'omit', 'section',
- 'protected', 'export',
- 'open',
- 'attribute',
- ), prefix=r'\b', suffix=r'\b'), Keyword.Namespace),
- (words((
- 'lemma', 'theorem', 'def', 'definition', 'example',
- 'axiom', 'axioms', 'constant', 'constants',
- 'universe', 'universes',
- 'inductive', 'coinductive', 'structure', 'extends',
- 'class', 'instance',
- 'abbreviation',
-
- 'noncomputable theory',
-
- 'noncomputable', 'mutual', 'meta',
-
- 'attribute',
-
- 'parameter', 'parameters',
- 'variable', 'variables',
-
- 'reserve', 'precedence',
- 'postfix', 'prefix', 'notation', 'infix', 'infixl', 'infixr',
-
- 'begin', 'by', 'end',
-
- 'set_option',
- 'run_cmd',
- ), prefix=r'\b', suffix=r'\b'), Keyword.Declaration),
- (r'@\[', Keyword.Declaration, 'attribute'),
- (words((
- '#eval', '#check', '#reduce', '#exit',
- '#print', '#help',
- ), suffix=r'\b'), Keyword),
- include('expression')
- ],
- 'attribute': [
- (r'\]', Keyword.Declaration, '#pop'),
- include('expression'),
- ],
- 'comment': [
- (r'[^/-]', Comment.Multiline),
- (r'/-', Comment.Multiline, '#push'),
- (r'-/', Comment.Multiline, '#pop'),
- (r'[/-]', Comment.Multiline)
- ],
- 'docstring': [
- (r'[^/-]', String.Doc),
- (r'-/', String.Doc, '#pop'),
- (r'[/-]', String.Doc)
- ],
- 'string': [
- (r'[^\\"]+', String.Double),
- (r"(?:(\\[\\\"'nt])|(\\x[0-9a-fA-F]{2})|(\\u[0-9a-fA-F]{4}))", String.Escape),
- ('"', String.Double, '#pop'),
- ],
- }
diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/requests/certs.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/requests/certs.py
deleted file mode 100644
index be422c3e91e43bacf60ff3302688df0b28742333..0000000000000000000000000000000000000000
--- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/requests/certs.py
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env python
-
-"""
-requests.certs
-~~~~~~~~~~~~~~
-
-This module returns the preferred default CA certificate bundle. There is
-only one — the one from the certifi package.
-
-If you are packaging Requests, e.g., for a Linux distribution or a managed
-environment, you can change the definition of where() to return a separately
-packaged CA bundle.
-"""
-from certifi import where
-
-if __name__ == "__main__":
- print(where())
diff --git a/spaces/qdd319/ChuanhuChatGPT/assets/custom.css b/spaces/qdd319/ChuanhuChatGPT/assets/custom.css
deleted file mode 100644
index f98c7df263b11afa4ddfb5d6ed18aef2ef234226..0000000000000000000000000000000000000000
--- a/spaces/qdd319/ChuanhuChatGPT/assets/custom.css
+++ /dev/null
@@ -1,250 +0,0 @@
-:root {
- --chatbot-color-light: #F3F3F3;
- --chatbot-color-dark: #121111;
-}
-
-/* 覆盖gradio的页脚信息QAQ */
-footer {
- display: none !important;
-}
-#footer{
- text-align: center;
-}
-#footer div{
- display: inline-block;
-}
-#footer .versions{
- font-size: 85%;
- opacity: 0.85;
-}
-
-/* user_info */
-#user_info {
- white-space: nowrap;
- margin-top: -1.3em !important;
- padding-left: 112px !important;
-}
-#user_info p {
- font-size: .85em;
- font-family: monospace;
- color: var(--body-text-color-subdued);
-}
-
-/* status_display */
-#status_display {
- display: flex;
- min-height: 2em;
- align-items: flex-end;
- justify-content: flex-end;
-}
-#status_display p {
- font-size: .85em;
- font-family: monospace;
- color: var(--body-text-color-subdued);
-}
-
-#chuanhu_chatbot, #status_display {
- transition: all 0.6s;
-}
-
-/* usage_display */
-#usage_display {
- position: relative;
- margin: 0;
- box-shadow: var(--block-shadow);
- border-width: var(--block-border-width);
- border-color: var(--block-border-color);
- border-radius: var(--block-radius);
- background: var(--block-background-fill);
- width: 100%;
- line-height: var(--line-sm);
- min-height: 2em;
-}
-#usage_display p, #usage_display span {
- margin: 0;
- padding: .5em 1em;
- font-size: .85em;
- color: var(--body-text-color-subdued);
-}
-.progress-bar {
- background-color: var(--input-background-fill);;
- margin: 0 1em;
- height: 20px;
- border-radius: 10px;
- overflow: hidden;
-}
-.progress {
- background-color: var(--block-title-background-fill);;
- height: 100%;
- border-radius: 10px;
- text-align: right;
- transition: width 0.5s ease-in-out;
-}
-.progress-text {
- /* color: white; */
- color: var(--color-accent) !important;
- font-size: 1em !important;
- font-weight: bold;
- padding-right: 10px;
- line-height: 20px;
-}
-/* list */
-ol:not(.options), ul:not(.options) {
- padding-inline-start: 2em !important;
-}
-
-/* 亮色 */
-@media (prefers-color-scheme: light) {
- #chuanhu_chatbot {
- background-color: var(--chatbot-color-light) !important;
- color: #000000 !important;
- }
- [data-testid = "bot"] {
- background-color: #FFFFFF !important;
- }
- [data-testid = "user"] {
- background-color: #95EC69 !important;
- }
-}
-/* 暗色 */
-@media (prefers-color-scheme: dark) {
- #chuanhu_chatbot {
- background-color: var(--chatbot-color-dark) !important;
- color: #FFFFFF !important;
- }
- [data-testid = "bot"] {
- background-color: #2C2C2C !important;
- }
- [data-testid = "user"] {
- background-color: #26B561 !important;
- }
- body {
- background-color: var(--neutral-950) !important;
- }
-}
-/* 对话气泡 */
-[class *= "message"] {
- border-radius: var(--radius-xl) !important;
- border: none;
- padding: var(--spacing-xl) !important;
- font-size: var(--text-md) !important;
- line-height: var(--line-md) !important;
- min-height: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
- min-width: calc(var(--text-md)*var(--line-md) + 2*var(--spacing-xl));
-}
-[data-testid = "bot"] {
- max-width: 85%;
- border-bottom-left-radius: 0 !important;
-}
-[data-testid = "user"] {
- max-width: 85%;
- width: auto !important;
- border-bottom-right-radius: 0 !important;
-}
-/* 表格 */
-table {
- margin: 1em 0;
- border-collapse: collapse;
- empty-cells: show;
-}
-td,th {
- border: 1.2px solid var(--border-color-primary) !important;
- padding: 0.2em;
-}
-thead {
- background-color: rgba(175,184,193,0.2);
-}
-thead th {
- padding: .5em .2em;
-}
-/* 行内代码 */
-code {
- display: inline;
- white-space: break-spaces;
- border-radius: 6px;
- margin: 0 2px 0 2px;
- padding: .2em .4em .1em .4em;
- background-color: rgba(175,184,193,0.2);
-}
-/* 代码块 */
-pre code {
- display: block;
- overflow: auto;
- white-space: pre;
- background-color: hsla(0, 0%, 0%, 80%)!important;
- border-radius: 10px;
- padding: 1.4em 1.2em 0em 1.4em;
- margin: 1.2em 2em 1.2em 0.5em;
- color: #FFF;
- box-shadow: 6px 6px 16px hsla(0, 0%, 0%, 0.2);
-}
-/* 代码高亮样式 */
-.highlight .hll { background-color: #49483e }
-.highlight .c { color: #75715e } /* Comment */
-.highlight .err { color: #960050; background-color: #1e0010 } /* Error */
-.highlight .k { color: #66d9ef } /* Keyword */
-.highlight .l { color: #ae81ff } /* Literal */
-.highlight .n { color: #f8f8f2 } /* Name */
-.highlight .o { color: #f92672 } /* Operator */
-.highlight .p { color: #f8f8f2 } /* Punctuation */
-.highlight .ch { color: #75715e } /* Comment.Hashbang */
-.highlight .cm { color: #75715e } /* Comment.Multiline */
-.highlight .cp { color: #75715e } /* Comment.Preproc */
-.highlight .cpf { color: #75715e } /* Comment.PreprocFile */
-.highlight .c1 { color: #75715e } /* Comment.Single */
-.highlight .cs { color: #75715e } /* Comment.Special */
-.highlight .gd { color: #f92672 } /* Generic.Deleted */
-.highlight .ge { font-style: italic } /* Generic.Emph */
-.highlight .gi { color: #a6e22e } /* Generic.Inserted */
-.highlight .gs { font-weight: bold } /* Generic.Strong */
-.highlight .gu { color: #75715e } /* Generic.Subheading */
-.highlight .kc { color: #66d9ef } /* Keyword.Constant */
-.highlight .kd { color: #66d9ef } /* Keyword.Declaration */
-.highlight .kn { color: #f92672 } /* Keyword.Namespace */
-.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
-.highlight .kr { color: #66d9ef } /* Keyword.Reserved */
-.highlight .kt { color: #66d9ef } /* Keyword.Type */
-.highlight .ld { color: #e6db74 } /* Literal.Date */
-.highlight .m { color: #ae81ff } /* Literal.Number */
-.highlight .s { color: #e6db74 } /* Literal.String */
-.highlight .na { color: #a6e22e } /* Name.Attribute */
-.highlight .nb { color: #f8f8f2 } /* Name.Builtin */
-.highlight .nc { color: #a6e22e } /* Name.Class */
-.highlight .no { color: #66d9ef } /* Name.Constant */
-.highlight .nd { color: #a6e22e } /* Name.Decorator */
-.highlight .ni { color: #f8f8f2 } /* Name.Entity */
-.highlight .ne { color: #a6e22e } /* Name.Exception */
-.highlight .nf { color: #a6e22e } /* Name.Function */
-.highlight .nl { color: #f8f8f2 } /* Name.Label */
-.highlight .nn { color: #f8f8f2 } /* Name.Namespace */
-.highlight .nx { color: #a6e22e } /* Name.Other */
-.highlight .py { color: #f8f8f2 } /* Name.Property */
-.highlight .nt { color: #f92672 } /* Name.Tag */
-.highlight .nv { color: #f8f8f2 } /* Name.Variable */
-.highlight .ow { color: #f92672 } /* Operator.Word */
-.highlight .w { color: #f8f8f2 } /* Text.Whitespace */
-.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
-.highlight .mf { color: #ae81ff } /* Literal.Number.Float */
-.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
-.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
-.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
-.highlight .sa { color: #e6db74 } /* Literal.String.Affix */
-.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
-.highlight .sc { color: #e6db74 } /* Literal.String.Char */
-.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
-.highlight .sd { color: #e6db74 } /* Literal.String.Doc */
-.highlight .s2 { color: #e6db74 } /* Literal.String.Double */
-.highlight .se { color: #ae81ff } /* Literal.String.Escape */
-.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
-.highlight .si { color: #e6db74 } /* Literal.String.Interpol */
-.highlight .sx { color: #e6db74 } /* Literal.String.Other */
-.highlight .sr { color: #e6db74 } /* Literal.String.Regex */
-.highlight .s1 { color: #e6db74 } /* Literal.String.Single */
-.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
-.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
-.highlight .fm { color: #a6e22e } /* Name.Function.Magic */
-.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
-.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
-.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
-.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
-.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Logicieldepiratagedecompteimvu.md b/spaces/quidiaMuxgu/Expedit-SAM/Logicieldepiratagedecompteimvu.md
deleted file mode 100644
index 42c9bc3720e2f11d4fe23023ff17b741d88aeebf..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/Logicieldepiratagedecompteimvu.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Crack No Cd De Age Of Pirates 2 City Of Abandoned Ships · Logicieldepiratagedecompteimvu · Pes 2013 Registry 64 Bit · Canalis Hydra 12 1fdad05405
-
-
-
diff --git a/spaces/quidiaMuxgu/Expedit-SAM/MaxSea V12.6.4.1 With CM93v3 Issue505 Professional.rar.md b/spaces/quidiaMuxgu/Expedit-SAM/MaxSea V12.6.4.1 With CM93v3 Issue505 Professional.rar.md
deleted file mode 100644
index 4fb1e0c0b3380111911ae5575fd4a4aa3a269da2..0000000000000000000000000000000000000000
--- a/spaces/quidiaMuxgu/Expedit-SAM/MaxSea V12.6.4.1 With CM93v3 Issue505 Professional.rar.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
MaxSea v12.6.4.1 with CM93v3 Issue505 Professional.rar
What is Autodesk AutoCAD Architecture 2020 and How to Use It?
-
Autodesk AutoCAD Architecture 2020 is a software application that helps architects and engineers design and document buildings more efficiently. It is a specialized toolset that is included with AutoCAD, the most popular software for 2D and 3D CAD. With AutoCAD Architecture 2020, you can create floor plans, sections, elevations, schedules, and 3D views of your architectural projects using industry-standard symbols and components.
In this article, we will show you how to use some of the features of AutoCAD Architecture 2020 to create a simple building design. You will learn how to:
-
-
Set up your drawing environment with architectural units and styles
-
Draw walls, doors, windows, and other building elements
-
Add dimensions, annotations, and tags to your drawings
-
Create sheets and layouts for printing and plotting
-
-
Before you start, you will need to download and install AutoCAD Architecture 2020 from the Autodesk website. You can get a free trial for 30 days or purchase a subscription plan that suits your needs. You will also need a computer that meets the system requirements for running the software.
-
Setting Up Your Drawing Environment
-
The first step in using AutoCAD Architecture 2020 is to set up your drawing environment with the appropriate units and styles. This will ensure that your drawings are accurate and consistent with the standards of your industry.
-
To set up your drawing environment, follow these steps:
-
-
Open AutoCAD Architecture 2020 and create a new drawing.
-
On the status bar, click on the Units icon and select Architectural from the drop-down menu. This will set your drawing units to feet and inches.
-
On the ribbon, click on the Home tab and then click on the Style Manager icon in the Manage panel. This will open the Style Manager dialog box.
-
In the Style Manager dialog box, expand Architectural Objects and select Wall Styles. You will see a list of predefined wall styles that you can use or modify for your project.
-
Select a wall style that matches your requirements and click on Modify. This will open the Wall Style dialog box.
-
In the Wall Style dialog box, you can change the properties of the wall style, such as its name, description, layer, materials, components, endcaps, cleanups, and display options.
-
Click on OK to save your changes and close the Wall Style dialog box.
-
Repeat steps 4 to 7 for other architectural objects, such as door styles, window styles, roof styles, etc.
-
Click on OK to close the Style Manager dialog box.
-
-
You have now set up your drawing environment with architectural units and styles. You are ready to start drawing your building design.
-
Drawing Walls, Doors, Windows, and Other Building Elements
-
The next step in using AutoCAD Architecture 2020 is to draw walls, doors, windows, and other building elements using the tools on the ribbon. You can use different methods to draw walls, such as straight walls, curved walls, polygonal walls, etc. You can also insert doors and windows into walls automatically or manually. You can also add other building elements, such as stairs, railings, roofs, etc.
-
To draw walls, doors, windows, and other building elements, follow these steps:
-
-
-
On the ribbon, click on the Home tab and then click on the Wall tool in the Build panel. This will activate the Wall command.
-
On the command line or dynamic input tooltip, specify a start point for the wall.
-
Move your cursor to specify a direction and length for the wall segment. You can also enter a distance or angle value on the command line or dynamic input tooltip.
-
Press Enter or right-click to end the wall segment.
-
Repeat steps 2 to 4 to draw more wall segments. You can also use options on the command line or dynamic input tooltip to change wall properties or methods. cec2833e83
-
-
\ No newline at end of file
diff --git a/spaces/ramiin2/AutoGPT/autogpt/__main__.py b/spaces/ramiin2/AutoGPT/autogpt/__main__.py
deleted file mode 100644
index 128f9eea4900429e88276abdde3419b806001ac7..0000000000000000000000000000000000000000
--- a/spaces/ramiin2/AutoGPT/autogpt/__main__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""Auto-GPT: A GPT powered AI Assistant"""
-import autogpt.cli
-
-if __name__ == "__main__":
- autogpt.cli.main()
diff --git a/spaces/ramiin2/AutoGPT/tests/unit/json_tests.py b/spaces/ramiin2/AutoGPT/tests/unit/json_tests.py
deleted file mode 100644
index 25c383377708359b5cfec28e0625343c5692f15c..0000000000000000000000000000000000000000
--- a/spaces/ramiin2/AutoGPT/tests/unit/json_tests.py
+++ /dev/null
@@ -1,114 +0,0 @@
-import unittest
-
-from autogpt.json_utils.json_fix_llm import fix_and_parse_json
-
-
-class TestParseJson(unittest.TestCase):
- def test_valid_json(self):
- # Test that a valid JSON string is parsed correctly
- json_str = '{"name": "John", "age": 30, "city": "New York"}'
- obj = fix_and_parse_json(json_str)
- self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"})
-
- def test_invalid_json_minor(self):
- # Test that an invalid JSON string can be fixed with gpt
- json_str = '{"name": "John", "age": 30, "city": "New York",}'
- self.assertEqual(
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False),
- {"name": "John", "age": 30, "city": "New York"},
- )
-
- def test_invalid_json_major_with_gpt(self):
- # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
- self.assertEqual(
- fix_and_parse_json(json_str, try_to_fix_with_gpt=True),
- {"name": "John", "age": 30, "city": "New York"},
- )
-
- def test_invalid_json_major_without_gpt(self):
- # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
- # Assert that this raises an exception:
- with self.assertRaises(Exception):
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
-
- def test_invalid_json_leading_sentence_with_gpt(self):
- # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = """I suggest we start by browsing the repository to find any issues that we can fix.
-
-{
- "command": {
- "name": "browse_website",
- "args":{
- "url": "https://github.com/Torantulino/Auto-GPT"
- }
- },
- "thoughts":
- {
- "text": "I suggest we start browsing the repository to find any issues that we can fix.",
- "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
- "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
- "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
- "speak": "I will start browsing the repository to find any issues we can fix."
- }
-}"""
- good_obj = {
- "command": {
- "name": "browse_website",
- "args": {"url": "https://github.com/Torantulino/Auto-GPT"},
- },
- "thoughts": {
- "text": "I suggest we start browsing the repository to find any issues that we can fix.",
- "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
- "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
- "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
- "speak": "I will start browsing the repository to find any issues we can fix.",
- },
- }
- # Assert that this raises an exception:
- self.assertEqual(
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
- )
-
- def test_invalid_json_leading_sentence_with_gpt(self):
- # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
- json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this.
-
-{
- "command": {
- "name": "browse_website",
- "args":{
- "url": "https://github.com/Torantulino/Auto-GPT"
- }
- },
- "thoughts":
- {
- "text": "Browsing the repository to identify potential bugs",
- "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
- "plan": "- Analyze the repository for potential bugs and areas of improvement",
- "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
- "speak": "I am browsing the repository to identify potential bugs."
- }
-}"""
- good_obj = {
- "command": {
- "name": "browse_website",
- "args": {"url": "https://github.com/Torantulino/Auto-GPT"},
- },
- "thoughts": {
- "text": "Browsing the repository to identify potential bugs",
- "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
- "plan": "- Analyze the repository for potential bugs and areas of improvement",
- "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
- "speak": "I am browsing the repository to identify potential bugs.",
- },
- }
- # Assert that this raises an exception:
- self.assertEqual(
- fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj
- )
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Autodata 3 40 English Crack Zip [PATCHED].md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Autodata 3 40 English Crack Zip [PATCHED].md
deleted file mode 100644
index 9a35f64e6f3194f5b8f206ef0cd37544c6cbdfa6..0000000000000000000000000000000000000000
--- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Autodata 3 40 English Crack Zip [PATCHED].md
+++ /dev/null
@@ -1,154 +0,0 @@
-
-
Autodata 3.40 English Crack Zip: How to Download and Use
-
-
If you are looking for a reliable and comprehensive software for car repair and diagnostics, you might want to check out Autodata 3.40. This software provides you with detailed information and instructions on various aspects of vehicle service and maintenance. You can access data on wiring diagrams, technical specifications, service schedules, repair times, fault codes, etc.
However, Autodata 3.40 is not a free software. You need to purchase a subscription to use it. But what if you don't have the budget or you just want to try it out before buying? Well, there is a way to get Autodata 3.40 for free by using an English crack zip file.
-
-
What is an English crack zip file?
-
-
An English crack zip file is a file that contains a cracked version of Autodata 3.40 and an English language pack. A cracked version of Autodata 3.40 is a modified version of the software that bypasses the activation process and allows you to use it without a subscription. An English language pack is a file that changes the default language of Autodata 3.40 from German to English.
-
-
By using an English crack zip file, you can download and install Autodata 3.40 on your PC and use it in English for free.
-
-
How to use an English crack zip file?
-
-
To use an English crack zip file, you need to follow these steps:
-
-
-
Download an English crack zip file for Autodata 3.40 from a reliable source. You can search for it on Google or use one of the links below:
Open the English crack zip file with WinRAR, 7-Zip or any other file compression tool.
-
Extract the contents of the English crack zip file to a folder on your PC.
-
Run the install.cmd file and wait until the installation process is completed.
-
Run the crack.bat file as administrator and wait until the cracking process is done.
-
Go to the Windows 7_64 folder and run all the registry files.
-
Go to the C:\\ADCDA2 folder and run the ADBCD.exe file.
-
Enjoy using Autodata 3.40 in English for free.
-
-
-
Conclusion
-
-
Autodata 3.40 is a software that provides you with detailed information and instructions on car repair and diagnostics. However, it is not a free software and you need to purchase a subscription to use it.
-
-
If you want to try Autodata 3.40 for free without paying anything, you can use an English crack zip file to download and install it on your PC and use it in English.
-
-
-
However, you should be careful when using an English crack zip file as it may contain viruses or malware that can harm your PC or compromise your data. You should always scan the English crack zip file with an antivirus program before opening it and only download it from trusted sources.
-
-
We hope this article has helped you understand how to use an English crack zip file for Autodata 3.40 and enjoy its benefits without paying anything.
-
What are the advantages and disadvantages of using Autodata 3.40 English crack zip?
-
-
Using Autodata 3.40 English crack zip has some advantages and disadvantages that you should be aware of before deciding to use it.
-
-
Some of the advantages are:
-
-
-
Free access: By using Autodata 3.40 English crack zip, you can access all the features and functions of Autodata 3.40 without paying anything. You can save money and enjoy the benefits of Autodata 3.40 for free.
-
Easy installation: By using Autodata 3.40 English crack zip, you can install Autodata 3.40 on your PC easily and quickly. You don't need to go through a complicated activation process or enter any license key. You just need to follow some simple steps and run some files.
-
English language: By using Autodata 3.40 English crack zip, you can use Autodata 3.40 in English language. You don't need to deal with the default German language or look for a separate language pack. You can understand and use Autodata 3.40 better in English.
-
-
-
Some of the disadvantages are:
-
-
-
Illegal activity: By using Autodata 3.40 English crack zip, you are violating the terms and conditions of Autodata 3.40 and infringing its intellectual property rights. You are also breaking the law and risking legal consequences such as fines or lawsuits.
-
Potential risks: By using Autodata 3.40 English crack zip, you are exposing your PC and data to potential risks such as viruses or malware. You don't know what the crack zip file contains or what it does to your PC. You may damage your PC or lose your data by using Autodata 3.40 English crack zip.
-
No support or updates: By using Autodata 3.40 English crack zip, you are not eligible for any support or updates from Autodata 3.40. You cannot contact the technical support team or access the online help if you have any questions or issues regarding Autodata 3.40. You also cannot get any updates or patches that may fix bugs or improve performance of Autodata 3.40.
-
-
-
Conclusion
-
-
Autodata 3.40 is a software that provides you with detailed information and instructions on car repair and diagnostics. However, it is not a free software and you need to purchase a subscription to use it.
-
-
If you want to try Autodata 3.40 for free without paying anything, you can use an English crack zip file to download and install it on your PC and use it in English.
-
-
However, you should be careful when using an English crack zip file as it may contain viruses or malware that can harm your PC or compromise your data. You should always scan the English crack zip file with an antivirus program before opening it and only download it from trusted sources.
-
-
You should also be aware of the advantages and disadvantages of using Autodata 3.40 English crack zip and decide whether it is worth it or not.
-
-
We hope this article has helped you understand how to use an English crack zip file for Autodata 3.40 and enjoy its benefits without paying anything.
-
How to update Autodata 3.40 English crack zip?
-
-
If you are using Autodata 3.40 English crack zip, you may wonder how to update it to get the latest data and features of Autodata 3.40. Unfortunately, there is no official way to update Autodata 3.40 English crack zip as it is an illegal and unofficial version of the software.
-
-
However, there are some unofficial ways to update Autodata 3.40 English crack zip that you can try at your own risk. These methods involve downloading and installing new crack zip files or patches that may contain updated data and features of Autodata 3.40.
-
-
Some of the sources where you can find these updates are:
However, you should be careful when using these updates as they may not work properly or they may contain viruses or malware that can harm your PC or compromise your data. You should always scan the update files with an antivirus program before opening them and only download them from trusted sources.
-
-
You should also backup your existing Autodata 3.40 English crack zip files and data before applying any updates as they may overwrite or delete them.
-
-
Conclusion
-
-
Autodata 3.40 is a software that provides you with detailed information and instructions on car repair and diagnostics. However, it is not a free software and you need to purchase a subscription to use it.
-
-
If you want to try Autodata 3.40 for free without paying anything, you can use an English crack zip file to download and install it on your PC and use it in English.
-
-
However, you should be careful when using an English crack zip file as it may contain viruses or malware that can harm your PC or compromise your data. You should always scan the English crack zip file with an antivirus program before opening it and only download it from trusted sources.
-
-
You should also be aware of the advantages and disadvantages of using Autodata 3.40 English crack zip and decide whether it is worth it or not.
-
-
If you want to update Autodata 3.40 English crack zip to get the latest data and features of Autodata 3.40, you can try some unofficial ways to do so at your own risk. However, you should be careful when using these updates as they may not work properly or they may contain viruses or malware that can harm your PC or compromise your data. You should always scan the update files with an antivirus program before opening them and only download them from trusted sources.
-
-
You should also backup your existing Autodata 3.40 English crack zip files and data before applying any updates as they may overwrite or delete them.
-
-
We hope this article has helped you understand how to use an English crack zip file for Autodata 3.40 and enjoy its benefits without paying anything.
-
How to use Autodata 3.40 English crack zip for car repair and diagnostics?
-
-
Once you have installed Autodata 3.40 English crack zip on your PC, you can use it for various purposes related to car repair and diagnostics. Here are some of the things you can do with Autodata 3.40 English crack zip:
-
-
-
Access wiring diagrams: Autodata 3.40 English crack zip provides you with wiring diagrams for various car models and systems. You can use these diagrams to understand how the electrical components of a car are connected and how they work. You can also use these diagrams to troubleshoot or repair any electrical problems in a car.
-
Access technical specifications: Autodata 3.40 English crack zip provides you with technical specifications for various car models and parts. You can use these specifications to know the dimensions, capacities, tolerances, pressures, etc. of a car or its components. You can also use these specifications to compare different cars or parts and choose the best ones for your needs.
-
Access service schedules: Autodata 3.40 English crack zip provides you with service schedules for various car models and systems. You can use these schedules to know when and how to perform routine maintenance and service on a car. You can also use these schedules to avoid any potential problems or failures in a car.
-
Access repair times: Autodata 3.40 English crack zip provides you with repair times for various car models and operations. You can use these times to estimate how long it will take to repair or replace a part or a system in a car. You can also use these times to plan your work and budget accordingly.
-
Access fault codes: Autodata 3.40 English crack zip provides you with fault codes for various car models and systems. You can use these codes to identify and diagnose any errors or malfunctions in a car. You can also use these codes to clear or reset any warning lights or indicators in a car.
-
-
-
Conclusion
-
-
Autodata 3.40 is a software that provides you with detailed information and instructions on car repair and diagnostics. However, it is not a free software and you need to purchase a subscription to use it.
-
-
If you want to try Autodata 3.40 for free without paying anything, you can use an English crack zip file to download and install it on your PC and use it in English.
-
-
However, you should be careful when using an English crack zip file as it may contain viruses or malware that can harm your PC or compromise your data. You should always scan the English crack zip file with an antivirus program before opening it and only download it from trusted sources.
-
-
You should also be aware of the advantages and disadvantages of using Autodata 3.40 English crack zip and decide whether it is worth it or not.
-
-
If you want to update Autodata 3.40 English crack zip to get the latest data and features of Autodata 3.40, you can try some unofficial ways to do so at your own risk. However, you should be careful when using these updates as they may not work properly or they may contain viruses or malware that can harm your PC or compromise your data. You should always scan the update files with an antivirus program before opening them and only download them from trusted sources.
-
-
You should also backup your existing Autodata 3.40 English crack zip files and data before applying any updates as they may overwrite or delete them.
-
-
If you want to use Autodata 3.40 English crack zip for car repair and diagnostics, you can do various things with it such as accessing wiring diagrams, technical specifications, service schedules, repair times, fault codes, etc.
-
-
We hope this article has helped you understand how to use an English crack zip file for Autodata 3.40 and enjoy its benefits without paying anything.
-
Autodata 3.40 is a software that provides you with detailed information and instructions on car repair and diagnostics. However, it is not a free software and you need to purchase a subscription to use it.
-
-
If you want to try Autodata 3.40 for free without paying anything, you can use an English crack zip file to download and install it on your PC and use it in English.
-
-
However, you should be careful when using an English crack zip file as it may contain viruses or malware that can harm your PC or compromise your data. You should always scan the English crack zip file with an antivirus program before opening it and only download it from trusted sources.
-
-
You should also be aware of the advantages and disadvantages of using Autodata 3.40 English crack zip and decide whether it is worth it or not.
-
-
If you want to update Autodata 3.40 English crack zip to get the latest data and features of Autodata 3.40, you can try some unofficial ways to do so at your own risk. However, you should be careful when using these updates as they may not work properly or they may contain viruses or malware that can harm your PC or compromise your data. You should always scan the update files with an antivirus program before opening them and only download them from trusted sources.
-
-
You should also backup your existing Autodata 3.40 English crack zip files and data before applying any updates as they may overwrite or delete them.
-
-
If you want to use Autodata 3.40 English crack zip for car repair and diagnostics, you can do various things with it such as accessing wiring diagrams, technical specifications, service schedules, repair times, fault codes, etc.
-
-
We hope this article has helped you understand how to use an English crack zip file for Autodata 3.40 and enjoy its benefits without paying anything.
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/renatotn7/teste2/gfpgan/archs/gfpgan_bilinear_arch.py b/spaces/renatotn7/teste2/gfpgan/archs/gfpgan_bilinear_arch.py
deleted file mode 100644
index 52e0de88de8543cf4afdc3988c4cdfc7c7060687..0000000000000000000000000000000000000000
--- a/spaces/renatotn7/teste2/gfpgan/archs/gfpgan_bilinear_arch.py
+++ /dev/null
@@ -1,312 +0,0 @@
-import math
-import random
-import torch
-from basicsr.utils.registry import ARCH_REGISTRY
-from torch import nn
-
-from .gfpganv1_arch import ResUpBlock
-from .stylegan2_bilinear_arch import (ConvLayer, EqualConv2d, EqualLinear, ResBlock, ScaledLeakyReLU,
- StyleGAN2GeneratorBilinear)
-
-
-class StyleGAN2GeneratorBilinearSFT(StyleGAN2GeneratorBilinear):
- """StyleGAN2 Generator with SFT modulation (Spatial Feature Transform).
-
- It is the bilinear version. It does not use the complicated UpFirDnSmooth function that is not friendly for
- deployment. It can be easily converted to the clean version: StyleGAN2GeneratorCSFT.
-
- Args:
- out_size (int): The spatial size of outputs.
- num_style_feat (int): Channel number of style features. Default: 512.
- num_mlp (int): Layer number of MLP style layers. Default: 8.
- channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
- lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01.
- narrow (float): The narrow ratio for channels. Default: 1.
- sft_half (bool): Whether to apply SFT on half of the input channels. Default: False.
- """
-
- def __init__(self,
- out_size,
- num_style_feat=512,
- num_mlp=8,
- channel_multiplier=2,
- lr_mlp=0.01,
- narrow=1,
- sft_half=False):
- super(StyleGAN2GeneratorBilinearSFT, self).__init__(
- out_size,
- num_style_feat=num_style_feat,
- num_mlp=num_mlp,
- channel_multiplier=channel_multiplier,
- lr_mlp=lr_mlp,
- narrow=narrow)
- self.sft_half = sft_half
-
- def forward(self,
- styles,
- conditions,
- input_is_latent=False,
- noise=None,
- randomize_noise=True,
- truncation=1,
- truncation_latent=None,
- inject_index=None,
- return_latents=False):
- """Forward function for StyleGAN2GeneratorBilinearSFT.
-
- Args:
- styles (list[Tensor]): Sample codes of styles.
- conditions (list[Tensor]): SFT conditions to generators.
- input_is_latent (bool): Whether input is latent style. Default: False.
- noise (Tensor | None): Input noise or None. Default: None.
- randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
- truncation (float): The truncation ratio. Default: 1.
- truncation_latent (Tensor | None): The truncation latent tensor. Default: None.
- inject_index (int | None): The injection index for mixing noise. Default: None.
- return_latents (bool): Whether to return style latents. Default: False.
- """
- # style codes -> latents with Style MLP layer
- if not input_is_latent:
- styles = [self.style_mlp(s) for s in styles]
- # noises
- if noise is None:
- if randomize_noise:
- noise = [None] * self.num_layers # for each style conv layer
- else: # use the stored noise
- noise = [getattr(self.noises, f'noise{i}') for i in range(self.num_layers)]
- # style truncation
- if truncation < 1:
- style_truncation = []
- for style in styles:
- style_truncation.append(truncation_latent + truncation * (style - truncation_latent))
- styles = style_truncation
- # get style latents with injection
- if len(styles) == 1:
- inject_index = self.num_latent
-
- if styles[0].ndim < 3:
- # repeat latent code for all the layers
- latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- else: # used for encoder with different latent code for each layer
- latent = styles[0]
- elif len(styles) == 2: # mixing noises
- if inject_index is None:
- inject_index = random.randint(1, self.num_latent - 1)
- latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
- latent2 = styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)
- latent = torch.cat([latent1, latent2], 1)
-
- # main generation
- out = self.constant_input(latent.shape[0])
- out = self.style_conv1(out, latent[:, 0], noise=noise[0])
- skip = self.to_rgb1(out, latent[:, 1])
-
- i = 1
- for conv1, conv2, noise1, noise2, to_rgb in zip(self.style_convs[::2], self.style_convs[1::2], noise[1::2],
- noise[2::2], self.to_rgbs):
- out = conv1(out, latent[:, i], noise=noise1)
-
- # the conditions may have fewer levels
- if i < len(conditions):
- # SFT part to combine the conditions
- if self.sft_half: # only apply SFT to half of the channels
- out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1)
- out_sft = out_sft * conditions[i - 1] + conditions[i]
- out = torch.cat([out_same, out_sft], dim=1)
- else: # apply SFT to all the channels
- out = out * conditions[i - 1] + conditions[i]
-
- out = conv2(out, latent[:, i + 1], noise=noise2)
- skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space
- i += 2
-
- image = skip
-
- if return_latents:
- return image, latent
- else:
- return image, None
-
-
-@ARCH_REGISTRY.register()
-class GFPGANBilinear(nn.Module):
- """The GFPGAN architecture: Unet + StyleGAN2 decoder with SFT.
-
- It is the bilinear version and it does not use the complicated UpFirDnSmooth function that is not friendly for
- deployment. It can be easily converted to the clean version: GFPGANv1Clean.
-
-
- Ref: GFP-GAN: Towards Real-World Blind Face Restoration with Generative Facial Prior.
-
- Args:
- out_size (int): The spatial size of outputs.
- num_style_feat (int): Channel number of style features. Default: 512.
- channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2.
- decoder_load_path (str): The path to the pre-trained decoder model (usually, the StyleGAN2). Default: None.
- fix_decoder (bool): Whether to fix the decoder. Default: True.
-
- num_mlp (int): Layer number of MLP style layers. Default: 8.
- lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01.
- input_is_latent (bool): Whether input is latent style. Default: False.
- different_w (bool): Whether to use different latent w for different layers. Default: False.
- narrow (float): The narrow ratio for channels. Default: 1.
- sft_half (bool): Whether to apply SFT on half of the input channels. Default: False.
- """
-
- def __init__(
- self,
- out_size,
- num_style_feat=512,
- channel_multiplier=1,
- decoder_load_path=None,
- fix_decoder=True,
- # for stylegan decoder
- num_mlp=8,
- lr_mlp=0.01,
- input_is_latent=False,
- different_w=False,
- narrow=1,
- sft_half=False):
-
- super(GFPGANBilinear, self).__init__()
- self.input_is_latent = input_is_latent
- self.different_w = different_w
- self.num_style_feat = num_style_feat
-
- unet_narrow = narrow * 0.5 # by default, use a half of input channels
- channels = {
- '4': int(512 * unet_narrow),
- '8': int(512 * unet_narrow),
- '16': int(512 * unet_narrow),
- '32': int(512 * unet_narrow),
- '64': int(256 * channel_multiplier * unet_narrow),
- '128': int(128 * channel_multiplier * unet_narrow),
- '256': int(64 * channel_multiplier * unet_narrow),
- '512': int(32 * channel_multiplier * unet_narrow),
- '1024': int(16 * channel_multiplier * unet_narrow)
- }
-
- self.log_size = int(math.log(out_size, 2))
- first_out_size = 2**(int(math.log(out_size, 2)))
-
- self.conv_body_first = ConvLayer(3, channels[f'{first_out_size}'], 1, bias=True, activate=True)
-
- # downsample
- in_channels = channels[f'{first_out_size}']
- self.conv_body_down = nn.ModuleList()
- for i in range(self.log_size, 2, -1):
- out_channels = channels[f'{2**(i - 1)}']
- self.conv_body_down.append(ResBlock(in_channels, out_channels))
- in_channels = out_channels
-
- self.final_conv = ConvLayer(in_channels, channels['4'], 3, bias=True, activate=True)
-
- # upsample
- in_channels = channels['4']
- self.conv_body_up = nn.ModuleList()
- for i in range(3, self.log_size + 1):
- out_channels = channels[f'{2**i}']
- self.conv_body_up.append(ResUpBlock(in_channels, out_channels))
- in_channels = out_channels
-
- # to RGB
- self.toRGB = nn.ModuleList()
- for i in range(3, self.log_size + 1):
- self.toRGB.append(EqualConv2d(channels[f'{2**i}'], 3, 1, stride=1, padding=0, bias=True, bias_init_val=0))
-
- if different_w:
- linear_out_channel = (int(math.log(out_size, 2)) * 2 - 2) * num_style_feat
- else:
- linear_out_channel = num_style_feat
-
- self.final_linear = EqualLinear(
- channels['4'] * 4 * 4, linear_out_channel, bias=True, bias_init_val=0, lr_mul=1, activation=None)
-
- # the decoder: stylegan2 generator with SFT modulations
- self.stylegan_decoder = StyleGAN2GeneratorBilinearSFT(
- out_size=out_size,
- num_style_feat=num_style_feat,
- num_mlp=num_mlp,
- channel_multiplier=channel_multiplier,
- lr_mlp=lr_mlp,
- narrow=narrow,
- sft_half=sft_half)
-
- # load pre-trained stylegan2 model if necessary
- if decoder_load_path:
- self.stylegan_decoder.load_state_dict(
- torch.load(decoder_load_path, map_location=lambda storage, loc: storage)['params_ema'])
- # fix decoder without updating params
- if fix_decoder:
- for _, param in self.stylegan_decoder.named_parameters():
- param.requires_grad = False
-
- # for SFT modulations (scale and shift)
- self.condition_scale = nn.ModuleList()
- self.condition_shift = nn.ModuleList()
- for i in range(3, self.log_size + 1):
- out_channels = channels[f'{2**i}']
- if sft_half:
- sft_out_channels = out_channels
- else:
- sft_out_channels = out_channels * 2
- self.condition_scale.append(
- nn.Sequential(
- EqualConv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0),
- ScaledLeakyReLU(0.2),
- EqualConv2d(out_channels, sft_out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=1)))
- self.condition_shift.append(
- nn.Sequential(
- EqualConv2d(out_channels, out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0),
- ScaledLeakyReLU(0.2),
- EqualConv2d(out_channels, sft_out_channels, 3, stride=1, padding=1, bias=True, bias_init_val=0)))
-
- def forward(self, x, return_latents=False, return_rgb=True, randomize_noise=True):
- """Forward function for GFPGANBilinear.
-
- Args:
- x (Tensor): Input images.
- return_latents (bool): Whether to return style latents. Default: False.
- return_rgb (bool): Whether return intermediate rgb images. Default: True.
- randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True.
- """
- conditions = []
- unet_skips = []
- out_rgbs = []
-
- # encoder
- feat = self.conv_body_first(x)
- for i in range(self.log_size - 2):
- feat = self.conv_body_down[i](feat)
- unet_skips.insert(0, feat)
-
- feat = self.final_conv(feat)
-
- # style code
- style_code = self.final_linear(feat.view(feat.size(0), -1))
- if self.different_w:
- style_code = style_code.view(style_code.size(0), -1, self.num_style_feat)
-
- # decode
- for i in range(self.log_size - 2):
- # add unet skip
- feat = feat + unet_skips[i]
- # ResUpLayer
- feat = self.conv_body_up[i](feat)
- # generate scale and shift for SFT layers
- scale = self.condition_scale[i](feat)
- conditions.append(scale.clone())
- shift = self.condition_shift[i](feat)
- conditions.append(shift.clone())
- # generate rgb images
- if return_rgb:
- out_rgbs.append(self.toRGB[i](feat))
-
- # decoder
- image, _ = self.stylegan_decoder([style_code],
- conditions,
- return_latents=return_latents,
- input_is_latent=self.input_is_latent,
- randomize_noise=randomize_noise)
-
- return image, out_rgbs
diff --git a/spaces/rickystanley76/streamlit-hans-rosling/README.md b/spaces/rickystanley76/streamlit-hans-rosling/README.md
deleted file mode 100644
index 6afbf41335fb2e206d745a9602e46ed444f70001..0000000000000000000000000000000000000000
--- a/spaces/rickystanley76/streamlit-hans-rosling/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: Streamlit Hans Rosling
-emoji: 📉
-colorFrom: red
-colorTo: green
-sdk: streamlit
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/rorallitri/biomedical-language-models/logs/CyberLinkPowerDVDUltra190251263CrackSerialKeykeygen !!BETTER!!.md b/spaces/rorallitri/biomedical-language-models/logs/CyberLinkPowerDVDUltra190251263CrackSerialKeykeygen !!BETTER!!.md
deleted file mode 100644
index 480ee50b883fd2c121f8de0e539c7b016ed0a60b..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/CyberLinkPowerDVDUltra190251263CrackSerialKeykeygen !!BETTER!!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- d5da3c52bf
-
-
-
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Download API 685 Standard for Sealless Pumps in PDF Format.md b/spaces/rorallitri/biomedical-language-models/logs/Download API 685 Standard for Sealless Pumps in PDF Format.md
deleted file mode 100644
index e0a94c844a8fb5f92e30dc8bf678f5f14bddb1cf..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Download API 685 Standard for Sealless Pumps in PDF Format.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-
Based on the worldwide proven hydraulic design of the SCE model, the SCE-M is a sealless centrifugal pump, 100% leakage free, avoiding high maintenance costs and usage of mechanical seals. It is the best option for heavy chemical handling applications, in completion to API 610 pumps for the majority of applications in the pretrochemical industry and refineries.
The free trial period begins on the day you sign up. At the end of the free trial period, your payment method will be charged unless you cancel. Cancel before your free trial ends and you won't be charged. On the 8th day, payment will be collected unless you cancel prior. When the free trial expires, you can still use Acrobat to view PDFs, comment and fill forms. To access all the features again, you will need to purchase Acrobat.
-
Please use the links below to explore all brands available through Teikoku USA. Each brand link will provide a Brand Family Performance Curve through which individual-sized products are further linked to specific water-based performance curves. These curves include standard product drawings available for immediate download. With this web tool, preliminary selections can be made in preparation for further discussion with Factory Application Engineers concerning actual process conditions and liquid properties for final selections and required options.
-
Teikoku leak free pumps are available in every Annex D circulation system detailed in API 685, from standard internal circulation and isolated motors to pressurized containment and high temperature without utility cooling.
English (United States) California Consumer Privacy Act (CCPA) Opt-Out Icon Your California Privacy Choices California Consumer Privacy Act (CCPA) Opt-Out Icon Your California Privacy Choices Contact us
Privacy & cookies Manage cookies Terms of use Trademarks About our ads
var ccpaElement = document.querySelector('.c-uhff-ccpa'); try var resolvedTimeZone = Intl.DateTimeFormat().resolvedOptions().timeZone; var timeZones = ['America/Los_Angeles', 'US/Pacific']; var showLink = timeZones.find(function (zone) return resolvedTimeZone === zone; ); if (showLink) ccpaElement.classList.remove('x-hidden'); catch ccpaElement.classList.remove('x-hidden'); Feedback
- aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Enjoy the Best Audio Experience with FULL SRS HD Audio Lab Gold 1039.md b/spaces/rorallitri/biomedical-language-models/logs/Enjoy the Best Audio Experience with FULL SRS HD Audio Lab Gold 1039.md
deleted file mode 100644
index 52f49b55751f607cb6af348d4dff000bc3a769fa..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Enjoy the Best Audio Experience with FULL SRS HD Audio Lab Gold 1039.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
vSphere Data Protection 5.5.7 Download: =353&rPId=6654&downloadGroup=VDP55_7
Release Notes: _557_releasenotes.html
-
I am also facing similar issue in ESX5.1. Whenever I try to add host to dvswitch it tries to enable the vxlan agent and it fails. Tried lot of things download and install VXLAN.zip manually, rebooting ESX, re-installing my ESX etc., but no luck.
-
Don't want to necro this, but I had to basically mount the VMDK to a different system to grab the vxlan.zip file off of it. Could not download it using IE or Firefox. My suggestion is to open a ticket with VMware support regarding the inability to download from there. Additionally, there are repo xmls in there that you can't refer to in powercli as well.
-
For our setup, I used CentOS-5.5-x86_64-netinstall.iso to do a network installation using _64/images and using all the defaults for "Server" type deployment. Since I can not re-distribute the installers for VMware vCloudr Director and Oracle Express, you will need to download the installers and transfer it to your CentOS system. The last step before you being is to transfer the vcd_setup.sh script and vcd.rsp response file to your CentOS system.
-
[[email protected] ~]# ls anaconda-ks.cfg install.log oracle-xe-10.2.0.1-1.0.i386.rpm vcd.resp vmware-cloud-director-1.0.0-285979.bin Desktop install.log.syslog oracle-xe-10.2.0.1-1.0.i386.rpm.old vcd_setup.sh [[email protected] ~]# ls anaconda-ks.cfg install.log oracle-xe-10.2.0.1-1.0.i386.rpm vcd.resp vmware-cloud-director-1.0.0-285979.bin Desktop install.log.syslog oracle-xe-10.2.0.1-1.0.i386.rpm.old vcd_setup.sh [[email protected] ~]# ./vcd_setup.sh vcd.resp : command not found : command not found : command not found : command not found : command not found
-
[[email protected] ~]# sh -x vcd_setup.sh vcd.resp + cyan='\E[36;40m' + green='\E[32;40m' + red='\E[31;40m' + yellow='\E[33;40m' + '[' 1 -ne 1 ']' + source vcd.resp ++ ORACLE_EXPRESS_RPM=$'oracle-xe-10.2.0.1-1.0.i386.rpm\r' ++ VMWARE_VCD_BIN=$'vmware-cloud-director-1.0.0-285979.bin\r' ++ $'\r' : command not found ++ IP_ADDRESS_2=$'192.168.15.62\r' ++ IP_ADDRESS_2_NETMASK=$'255.255.255.0\r' ++ $'\r' : command not found ++ KEYSTORE_PASSWORD=$'vcloud\r' ++ KEYSTORE_ORG_UNIT_NAME=$'Engineering\r' ++ KEYSTORE_ORG=$'virtual\r' ++ KEYSTORE_CITY=$'Hyderabad\r' ++ KEYSTORE_STATE=$'AP\r' ++ KEYSTORE_COUNTRY=$'India\r' ++ $'\r' : command not found ++ ORACLE_XE_GUI_PORT=$'8080\r' ++ ORACLE_XE_LISTENER_PORT=$'1521\r' ++ ORACLE_SYS_PASSWORD=$'vcloud\r' ++ ORACLE_VCLOUD_USERNAME=$'vcloud\r' ++ ORACLE_VCLOUD_PASSWORD=$'vcloud\r' ++ $'\r' : command not found ++ TABLESPACE_CLOUD_DATA_SIZE=$'1000M\r' ++ TABLESPACE_CLOUD_INDEX_SIZE=$'10M\r' ++ $'\r' : command not found + verify + '[' 0 -ne 0 ']' + '[' '!' -f $'oracle-xe-10.2.0.1-1.0.i386.rpm\r' ']' !' '\E[31;40m'e to find oracle-xe-10.2.0.1-1.0.i386.rpm + local 'default_msg=No message passed.' !'message='Unable to find oracle-xe-10.2.0.1-1.0.i386.rpm + color='\E[31;40m' + echo -e '\E[31;40m'
-
It looks like there's some issues with sourceforge's SVN web-browser. I've heard their site was recently attacked, so I don't know if it's related or not. The actual SVN repo is in fact available if you use the CLI but you won't be able to download from the web until the issue is resolved.
-
-
I had the same issue as surya, with the "command not found" and "IP_ADDRESS_2 is not defined!" errors. All I did was to redownload the vcd.resp file using wget from the vCD server itself instead of downloading it using a web browser. It was probably just a white space error or similar, and now it's working flawlessly. Thanks a lot for these scripts, they got me up and running in no time!
-
[email protected] script installtion]# ./vcd_setup.sh vcd.resp bash: ./vcd_setup.sh: /bin/bash^M: bad interpreter: No such file or directory [[email protected] script installtion]# sh -x vcd_setup.sh vcd.resp + $'\r' : command not found2: + $'\r' : command not found6: ' cyan='\E[36;40m ' green='\E[32;40m ' red='\E[31;40m ' yellow='\E[33;40m + $'\r' : command not found11: 'cd_setup.sh: line 12: syntax error near unexpected token `{ 'cd_setup.sh: line 12: `cecho() { [[email protected] script installtion]#
-
Step 6. Download the VMware Public Keys: Download and install the VMware Public Key. From KB 2005832: The installation file for vCloud Director is digitally signed to secure your environment. To install the product, you must verify the signature by downloading and installing the VMware public key in your environment.
-
Step 7. Download and install the libXdmcp package: One of the required packages for installing vCloud Director is not installed by default on RHEL6. I found it easier to download the package off the internet from rpmfind.net. Check this repository before continuing to make sure the packages haven't been updated from my commands below.
-
Step 9. Start and Stop the installation: go to the directory of the file, change the .bin to make it an executable. start the executable. When it asksif you wish to run this script. Press N for NO. Very Important
-
Change the permissions of the transfer folder. by default, root is assigned and we need vcloud to be assigned. Mount the new export. If it succeeds, then no error message will be shown. Check the permissions of the transfer folder to make sure vcloud is the owner
-
Step 12. response.properties. The response.properties file is located in /opt/vmware/vcloud-director/etc and is used by all vCloud Director cells and needs to be protected in a secure location. Since the NFS transfer director is only accessible by the vCloud Cells, it's secure enough for me. We first need to verify that it has the the correct ownership with vcloud:vcloud, if it doesn't, then chown it like we did in step 9. Then copy the file to the NFS share
-
I think this will make you happy. VMware just released the vCloud Director virtual appliance. That means no more installing Red Hat, Oracle and vCloud Director. Just download the appliance and deploy it. On top of there is a great vCloud Cloud Director Evaluators Guide which will help you to evaluate the product.
-
Problem: In vCloud director a new Provider VDC was created and linked to the new vCenter server cluster. Within the existing Organizations we created an additional virtual datacenter with MAC provider VDC selected. This created new resource pool in the cluster.
-
vCloud director 5.5 release notes: _notes_vcloud_director_55.html#sysreqs Pre upgrade checklist for potential edge gateway problems: =en_US&cmd=displayKC&externalId=2058977 =en_US&cmd=displayKC&externalId=2060065
If your Vulkan driver is blacklisted, you might have to add mks.vk.allowUnsupportedDevices = "TRUE" to ~/.vmware/preferences or switch to a supported driver - check vmware.log in the VM's directory if unsure.
-
Despite setting the VMWARE_USE_SHIPPED_LIBS variable, VMWare may still fail to find certain libraries. An example is the libfontconfig.so.1 library. Check vmware logs in the tmp directory to see which libraries are still not found. Copy them to the appropriate path with libraries existing on the system:
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/rorallitri/biomedical-language-models/logs/Heathen City 3 Download Tested wallhack americano o How to Get the Best Cheats for the Game.md b/spaces/rorallitri/biomedical-language-models/logs/Heathen City 3 Download Tested wallhack americano o How to Get the Best Cheats for the Game.md
deleted file mode 100644
index 1e5d382746fda64b24aedbd6d1eabb8a33611283..0000000000000000000000000000000000000000
--- a/spaces/rorallitri/biomedical-language-models/logs/Heathen City 3 Download Tested wallhack americano o How to Get the Best Cheats for the Game.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
When the above was published I was in the far South. It wasaltogether the effort of my friends who had heard me tell thestory of Africa and her neglected millions, and the publicitygiven through the press of the reception referred to at Kinsley'sBanquet Hall. I never made application to the Government forthis position, neither did I write a line to any of the papersadvocating the appointment. I simply felt that if it was God'swill, I should be appointed. It came about without my interferingin the matter. Indeed, I protested against rushing me into such aprominent and important position, and in any case begged thefriends to be careful in their enthusiasm to avoid a fanciful andmisleading representation of my abilities and qualifications. Ifelt that I could not assume such an important Governmentposition. There were, I heard, fifty applications for the post. Theman who was eventually chosen, who was a scholar, afterwardscame to me for some information about the place to which hewas appointed, and I was glad to give him all the information Icould, for truly he was far superior to myself and better fitted forthe post.
-
Heathen City 3 Download | Tested wallhack americano o
-
-Related promotions: David Foster: Hit Man & Friends 2008 CD/DVD ... Jul 18, 2020 · Music Concert = Full performance released on DVD or Bluray ... Free delivery and returns on eBay Plus items for Plus members. s (I deal scenario) 04. ... BIG SHOW 2011 BIGBANG LIVE CONCERT CD [BIG SHOW] ALIVE ... 1fdad05405
-
-
-
diff --git a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/wavenet.py b/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/wavenet.py
deleted file mode 100644
index a14870e5b5af2874fca6c55f37b806a616bc30e0..0000000000000000000000000000000000000000
--- a/spaces/segments-tobias/conex/espnet/nets/pytorch_backend/wavenet.py
+++ /dev/null
@@ -1,448 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2019 Tomoki Hayashi (Nagoya University)
-# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
-
-"""This code is based on https://github.com/kan-bayashi/PytorchWaveNetVocoder."""
-
-import logging
-import sys
-import time
-
-import numpy as np
-import torch
-import torch.nn.functional as F
-
-from torch import nn
-
-
-def encode_mu_law(x, mu=256):
- """Perform mu-law encoding.
-
- Args:
- x (ndarray): Audio signal with the range from -1 to 1.
- mu (int): Quantized level.
-
- Returns:
- ndarray: Quantized audio signal with the range from 0 to mu - 1.
-
- """
- mu = mu - 1
- fx = np.sign(x) * np.log(1 + mu * np.abs(x)) / np.log(1 + mu)
- return np.floor((fx + 1) / 2 * mu + 0.5).astype(np.int64)
-
-
-def decode_mu_law(y, mu=256):
- """Perform mu-law decoding.
-
- Args:
- x (ndarray): Quantized audio signal with the range from 0 to mu - 1.
- mu (int): Quantized level.
-
- Returns:
- ndarray: Audio signal with the range from -1 to 1.
-
- """
- mu = mu - 1
- fx = (y - 0.5) / mu * 2 - 1
- x = np.sign(fx) / mu * ((1 + mu) ** np.abs(fx) - 1)
- return x
-
-
-def initialize(m):
- """Initilize conv layers with xavier.
-
- Args:
- m (torch.nn.Module): Torch module.
-
- """
- if isinstance(m, nn.Conv1d):
- nn.init.xavier_uniform_(m.weight)
- nn.init.constant_(m.bias, 0.0)
-
- if isinstance(m, nn.ConvTranspose2d):
- nn.init.constant_(m.weight, 1.0)
- nn.init.constant_(m.bias, 0.0)
-
-
-class OneHot(nn.Module):
- """Convert to one-hot vector.
-
- Args:
- depth (int): Dimension of one-hot vector.
-
- """
-
- def __init__(self, depth):
- super(OneHot, self).__init__()
- self.depth = depth
-
- def forward(self, x):
- """Calculate forward propagation.
-
- Args:
- x (LongTensor): long tensor variable with the shape (B, T)
-
- Returns:
- Tensor: float tensor variable with the shape (B, depth, T)
-
- """
- x = x % self.depth
- x = torch.unsqueeze(x, 2)
- x_onehot = x.new_zeros(x.size(0), x.size(1), self.depth).float()
-
- return x_onehot.scatter_(2, x, 1)
-
-
-class CausalConv1d(nn.Module):
- """1D dilated causal convolution."""
-
- def __init__(self, in_channels, out_channels, kernel_size, dilation=1, bias=True):
- super(CausalConv1d, self).__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.kernel_size = kernel_size
- self.dilation = dilation
- self.padding = padding = (kernel_size - 1) * dilation
- self.conv = nn.Conv1d(
- in_channels,
- out_channels,
- kernel_size,
- padding=padding,
- dilation=dilation,
- bias=bias,
- )
-
- def forward(self, x):
- """Calculate forward propagation.
-
- Args:
- x (Tensor): Input tensor with the shape (B, in_channels, T).
-
- Returns:
- Tensor: Tensor with the shape (B, out_channels, T)
-
- """
- x = self.conv(x)
- if self.padding != 0:
- x = x[:, :, : -self.padding]
- return x
-
-
-class UpSampling(nn.Module):
- """Upsampling layer with deconvolution.
-
- Args:
- upsampling_factor (int): Upsampling factor.
-
- """
-
- def __init__(self, upsampling_factor, bias=True):
- super(UpSampling, self).__init__()
- self.upsampling_factor = upsampling_factor
- self.bias = bias
- self.conv = nn.ConvTranspose2d(
- 1,
- 1,
- kernel_size=(1, self.upsampling_factor),
- stride=(1, self.upsampling_factor),
- bias=self.bias,
- )
-
- def forward(self, x):
- """Calculate forward propagation.
-
- Args:
- x (Tensor): Input tensor with the shape (B, C, T)
-
- Returns:
- Tensor: Tensor with the shape (B, C, T') where T' = T * upsampling_factor.
-
- """
- x = x.unsqueeze(1) # B x 1 x C x T
- x = self.conv(x) # B x 1 x C x T'
- return x.squeeze(1)
-
-
-class WaveNet(nn.Module):
- """Conditional wavenet.
-
- Args:
- n_quantize (int): Number of quantization.
- n_aux (int): Number of aux feature dimension.
- n_resch (int): Number of filter channels for residual block.
- n_skipch (int): Number of filter channels for skip connection.
- dilation_depth (int): Number of dilation depth
- (e.g. if set 10, max dilation = 2^(10-1)).
- dilation_repeat (int): Number of dilation repeat.
- kernel_size (int): Filter size of dilated causal convolution.
- upsampling_factor (int): Upsampling factor.
-
- """
-
- def __init__(
- self,
- n_quantize=256,
- n_aux=28,
- n_resch=512,
- n_skipch=256,
- dilation_depth=10,
- dilation_repeat=3,
- kernel_size=2,
- upsampling_factor=0,
- ):
- super(WaveNet, self).__init__()
- self.n_aux = n_aux
- self.n_quantize = n_quantize
- self.n_resch = n_resch
- self.n_skipch = n_skipch
- self.kernel_size = kernel_size
- self.dilation_depth = dilation_depth
- self.dilation_repeat = dilation_repeat
- self.upsampling_factor = upsampling_factor
-
- self.dilations = [
- 2 ** i for i in range(self.dilation_depth)
- ] * self.dilation_repeat
- self.receptive_field = (self.kernel_size - 1) * sum(self.dilations) + 1
-
- # for preprocessing
- self.onehot = OneHot(self.n_quantize)
- self.causal = CausalConv1d(self.n_quantize, self.n_resch, self.kernel_size)
- if self.upsampling_factor > 0:
- self.upsampling = UpSampling(self.upsampling_factor)
-
- # for residual blocks
- self.dil_sigmoid = nn.ModuleList()
- self.dil_tanh = nn.ModuleList()
- self.aux_1x1_sigmoid = nn.ModuleList()
- self.aux_1x1_tanh = nn.ModuleList()
- self.skip_1x1 = nn.ModuleList()
- self.res_1x1 = nn.ModuleList()
- for d in self.dilations:
- self.dil_sigmoid += [
- CausalConv1d(self.n_resch, self.n_resch, self.kernel_size, d)
- ]
- self.dil_tanh += [
- CausalConv1d(self.n_resch, self.n_resch, self.kernel_size, d)
- ]
- self.aux_1x1_sigmoid += [nn.Conv1d(self.n_aux, self.n_resch, 1)]
- self.aux_1x1_tanh += [nn.Conv1d(self.n_aux, self.n_resch, 1)]
- self.skip_1x1 += [nn.Conv1d(self.n_resch, self.n_skipch, 1)]
- self.res_1x1 += [nn.Conv1d(self.n_resch, self.n_resch, 1)]
-
- # for postprocessing
- self.conv_post_1 = nn.Conv1d(self.n_skipch, self.n_skipch, 1)
- self.conv_post_2 = nn.Conv1d(self.n_skipch, self.n_quantize, 1)
-
- def forward(self, x, h):
- """Calculate forward propagation.
-
- Args:
- x (LongTensor): Quantized input waveform tensor with the shape (B, T).
- h (Tensor): Auxiliary feature tensor with the shape (B, n_aux, T).
-
- Returns:
- Tensor: Logits with the shape (B, T, n_quantize).
-
- """
- # preprocess
- output = self._preprocess(x)
- if self.upsampling_factor > 0:
- h = self.upsampling(h)
-
- # residual block
- skip_connections = []
- for i in range(len(self.dilations)):
- output, skip = self._residual_forward(
- output,
- h,
- self.dil_sigmoid[i],
- self.dil_tanh[i],
- self.aux_1x1_sigmoid[i],
- self.aux_1x1_tanh[i],
- self.skip_1x1[i],
- self.res_1x1[i],
- )
- skip_connections.append(skip)
-
- # skip-connection part
- output = sum(skip_connections)
- output = self._postprocess(output)
-
- return output
-
- def generate(self, x, h, n_samples, interval=None, mode="sampling"):
- """Generate a waveform with fast genration algorithm.
-
- This generation based on `Fast WaveNet Generation Algorithm`_.
-
- Args:
- x (LongTensor): Initial waveform tensor with the shape (T,).
- h (Tensor): Auxiliary feature tensor with the shape (n_samples + T, n_aux).
- n_samples (int): Number of samples to be generated.
- interval (int, optional): Log interval.
- mode (str, optional): "sampling" or "argmax".
-
- Return:
- ndarray: Generated quantized waveform (n_samples).
-
- .. _`Fast WaveNet Generation Algorithm`: https://arxiv.org/abs/1611.09482
-
- """
- # reshape inputs
- assert len(x.shape) == 1
- assert len(h.shape) == 2 and h.shape[1] == self.n_aux
- x = x.unsqueeze(0)
- h = h.transpose(0, 1).unsqueeze(0)
-
- # perform upsampling
- if self.upsampling_factor > 0:
- h = self.upsampling(h)
-
- # padding for shortage
- if n_samples > h.shape[2]:
- h = F.pad(h, (0, n_samples - h.shape[2]), "replicate")
-
- # padding if the length less than
- n_pad = self.receptive_field - x.size(1)
- if n_pad > 0:
- x = F.pad(x, (n_pad, 0), "constant", self.n_quantize // 2)
- h = F.pad(h, (n_pad, 0), "replicate")
-
- # prepare buffer
- output = self._preprocess(x)
- h_ = h[:, :, : x.size(1)]
- output_buffer = []
- buffer_size = []
- for i, d in enumerate(self.dilations):
- output, _ = self._residual_forward(
- output,
- h_,
- self.dil_sigmoid[i],
- self.dil_tanh[i],
- self.aux_1x1_sigmoid[i],
- self.aux_1x1_tanh[i],
- self.skip_1x1[i],
- self.res_1x1[i],
- )
- if d == 2 ** (self.dilation_depth - 1):
- buffer_size.append(self.kernel_size - 1)
- else:
- buffer_size.append(d * 2 * (self.kernel_size - 1))
- output_buffer.append(output[:, :, -buffer_size[i] - 1 : -1])
-
- # generate
- samples = x[0]
- start_time = time.time()
- for i in range(n_samples):
- output = samples[-self.kernel_size * 2 + 1 :].unsqueeze(0)
- output = self._preprocess(output)
- h_ = h[:, :, samples.size(0) - 1].contiguous().view(1, self.n_aux, 1)
- output_buffer_next = []
- skip_connections = []
- for j, d in enumerate(self.dilations):
- output, skip = self._generate_residual_forward(
- output,
- h_,
- self.dil_sigmoid[j],
- self.dil_tanh[j],
- self.aux_1x1_sigmoid[j],
- self.aux_1x1_tanh[j],
- self.skip_1x1[j],
- self.res_1x1[j],
- )
- output = torch.cat([output_buffer[j], output], dim=2)
- output_buffer_next.append(output[:, :, -buffer_size[j] :])
- skip_connections.append(skip)
-
- # update buffer
- output_buffer = output_buffer_next
-
- # get predicted sample
- output = sum(skip_connections)
- output = self._postprocess(output)[0]
- if mode == "sampling":
- posterior = F.softmax(output[-1], dim=0)
- dist = torch.distributions.Categorical(posterior)
- sample = dist.sample().unsqueeze(0)
- elif mode == "argmax":
- sample = output.argmax(-1)
- else:
- logging.error("mode should be sampling or argmax")
- sys.exit(1)
- samples = torch.cat([samples, sample], dim=0)
-
- # show progress
- if interval is not None and (i + 1) % interval == 0:
- elapsed_time_per_sample = (time.time() - start_time) / interval
- logging.info(
- "%d/%d estimated time = %.3f sec (%.3f sec / sample)"
- % (
- i + 1,
- n_samples,
- (n_samples - i - 1) * elapsed_time_per_sample,
- elapsed_time_per_sample,
- )
- )
- start_time = time.time()
-
- return samples[-n_samples:].cpu().numpy()
-
- def _preprocess(self, x):
- x = self.onehot(x).transpose(1, 2)
- output = self.causal(x)
- return output
-
- def _postprocess(self, x):
- output = F.relu(x)
- output = self.conv_post_1(output)
- output = F.relu(output) # B x C x T
- output = self.conv_post_2(output).transpose(1, 2) # B x T x C
- return output
-
- def _residual_forward(
- self,
- x,
- h,
- dil_sigmoid,
- dil_tanh,
- aux_1x1_sigmoid,
- aux_1x1_tanh,
- skip_1x1,
- res_1x1,
- ):
- output_sigmoid = dil_sigmoid(x)
- output_tanh = dil_tanh(x)
- aux_output_sigmoid = aux_1x1_sigmoid(h)
- aux_output_tanh = aux_1x1_tanh(h)
- output = torch.sigmoid(output_sigmoid + aux_output_sigmoid) * torch.tanh(
- output_tanh + aux_output_tanh
- )
- skip = skip_1x1(output)
- output = res_1x1(output)
- output = output + x
- return output, skip
-
- def _generate_residual_forward(
- self,
- x,
- h,
- dil_sigmoid,
- dil_tanh,
- aux_1x1_sigmoid,
- aux_1x1_tanh,
- skip_1x1,
- res_1x1,
- ):
- output_sigmoid = dil_sigmoid(x)[:, :, -1:]
- output_tanh = dil_tanh(x)[:, :, -1:]
- aux_output_sigmoid = aux_1x1_sigmoid(h)
- aux_output_tanh = aux_1x1_tanh(h)
- output = torch.sigmoid(output_sigmoid + aux_output_sigmoid) * torch.tanh(
- output_tanh + aux_output_tanh
- )
- skip = skip_1x1(output)
- output = res_1x1(output)
- output = output + x[:, :, -1:] # B x C x 1
- return output, skip
diff --git a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/util/logger.py b/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/util/logger.py
deleted file mode 100644
index 18145f54c927abd59b95f3fa6e6da8002bc2ce97..0000000000000000000000000000000000000000
--- a/spaces/segments/panoptic-segment-anything-api/GroundingDINO/groundingdino/util/logger.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import functools
-import logging
-import os
-import sys
-
-from termcolor import colored
-
-
-class _ColorfulFormatter(logging.Formatter):
- def __init__(self, *args, **kwargs):
- self._root_name = kwargs.pop("root_name") + "."
- self._abbrev_name = kwargs.pop("abbrev_name", "")
- if len(self._abbrev_name):
- self._abbrev_name = self._abbrev_name + "."
- super(_ColorfulFormatter, self).__init__(*args, **kwargs)
-
- def formatMessage(self, record):
- record.name = record.name.replace(self._root_name, self._abbrev_name)
- log = super(_ColorfulFormatter, self).formatMessage(record)
- if record.levelno == logging.WARNING:
- prefix = colored("WARNING", "red", attrs=["blink"])
- elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
- prefix = colored("ERROR", "red", attrs=["blink", "underline"])
- else:
- return log
- return prefix + " " + log
-
-
-# so that calling setup_logger multiple times won't add many handlers
-@functools.lru_cache()
-def setup_logger(output=None, distributed_rank=0, *, color=True, name="imagenet", abbrev_name=None):
- """
- Initialize the detectron2 logger and set its verbosity level to "INFO".
-
- Args:
- output (str): a file name or a directory to save log. If None, will not save log file.
- If ends with ".txt" or ".log", assumed to be a file name.
- Otherwise, logs will be saved to `output/log.txt`.
- name (str): the root module name of this logger
-
- Returns:
- logging.Logger: a logger
- """
- logger = logging.getLogger(name)
- logger.setLevel(logging.DEBUG)
- logger.propagate = False
-
- if abbrev_name is None:
- abbrev_name = name
-
- plain_formatter = logging.Formatter(
- "[%(asctime)s.%(msecs)03d]: %(message)s", datefmt="%m/%d %H:%M:%S"
- )
- # stdout logging: master only
- if distributed_rank == 0:
- ch = logging.StreamHandler(stream=sys.stdout)
- ch.setLevel(logging.DEBUG)
- if color:
- formatter = _ColorfulFormatter(
- colored("[%(asctime)s.%(msecs)03d]: ", "green") + "%(message)s",
- datefmt="%m/%d %H:%M:%S",
- root_name=name,
- abbrev_name=str(abbrev_name),
- )
- else:
- formatter = plain_formatter
- ch.setFormatter(formatter)
- logger.addHandler(ch)
-
- # file logging: all workers
- if output is not None:
- if output.endswith(".txt") or output.endswith(".log"):
- filename = output
- else:
- filename = os.path.join(output, "log.txt")
- if distributed_rank > 0:
- filename = filename + f".rank{distributed_rank}"
- os.makedirs(os.path.dirname(filename), exist_ok=True)
-
- fh = logging.StreamHandler(_cached_log_stream(filename))
- fh.setLevel(logging.DEBUG)
- fh.setFormatter(plain_formatter)
- logger.addHandler(fh)
-
- return logger
-
-
-# cache the opened file object, so that different calls to `setup_logger`
-# with the same file name can safely write to the same file.
-@functools.lru_cache(maxsize=None)
-def _cached_log_stream(filename):
- return open(filename, "a")
diff --git a/spaces/seo-sean/andite-pastel-mix/app.py b/spaces/seo-sean/andite-pastel-mix/app.py
deleted file mode 100644
index f90587fb1b4538cf5a92df4655fed8e4e2cf9579..0000000000000000000000000000000000000000
--- a/spaces/seo-sean/andite-pastel-mix/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/andite/pastel-mix").launch()
\ No newline at end of file
diff --git a/spaces/shikunl/prismer/prismer/experts/ocr_detection/charnet/modeling/__init__.py b/spaces/shikunl/prismer/prismer/experts/ocr_detection/charnet/modeling/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/shuhulhandoo/face-swap/face_detection.py b/spaces/shuhulhandoo/face-swap/face_detection.py
deleted file mode 100644
index 863835b3953208290aefa2e976258c98ba54f7ed..0000000000000000000000000000000000000000
--- a/spaces/shuhulhandoo/face-swap/face_detection.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import cv2
-import dlib
-import numpy as np
-
-## Face detection
-def face_detection(img,upsample_times=1):
- # Ask the detector to find the bounding boxes of each face. The 1 in the
- # second argument indicates that we should upsample the image 1 time. This
- # will make everything bigger and allow us to detect more faces.
- detector = dlib.get_frontal_face_detector()
- faces = detector(img, upsample_times)
-
- return faces
-
-PREDICTOR_PATH = 'models/shape_predictor_68_face_landmarks.dat'
-predictor = dlib.shape_predictor(PREDICTOR_PATH)
-## Face and points detection
-def face_points_detection(img, bbox:dlib.rectangle):
- # Get the landmarks/parts for the face in box d.
- shape = predictor(img, bbox)
-
- # loop over the 68 facial landmarks and convert them
- # to a 2-tuple of (x, y)-coordinates
- coords = np.asarray(list([p.x, p.y] for p in shape.parts()), dtype=int)
-
- # return the array of (x, y)-coordinates
- return coords
-
-def select_face(im, r=10, choose=True):
- faces = face_detection(im)
-
- if len(faces) == 0:
- return None, None, None
-
- if len(faces) == 1 or not choose:
- idx = np.argmax([(face.right() - face.left()) * (face.bottom() - face.top()) for face in faces])
- bbox = faces[idx]
- else:
- bbox = []
-
- def click_on_face(event, x, y, flags, params):
- if event != cv2.EVENT_LBUTTONDOWN:
- return
-
- for face in faces:
- if face.left() < x < face.right() and face.top() < y < face.bottom():
- bbox.append(face)
- break
-
- im_copy = im.copy()
- for face in faces:
- # draw the face bounding box
- cv2.rectangle(im_copy, (face.left(), face.top()), (face.right(), face.bottom()), (0, 0, 255), 1)
- cv2.imshow('Click the Face:', im_copy)
- cv2.setMouseCallback('Click the Face:', click_on_face)
- while len(bbox) == 0:
- cv2.waitKey(1)
- cv2.destroyAllWindows()
- bbox = bbox[0]
-
- points = np.asarray(face_points_detection(im, bbox))
-
- im_w, im_h = im.shape[:2]
- left, top = np.min(points, 0)
- right, bottom = np.max(points, 0)
-
- x, y = max(0, left - r), max(0, top - r)
- w, h = min(right + r, im_h) - x, min(bottom + r, im_w) - y
-
- return points - np.asarray([[x, y]]), (x, y, w, h), im[y:y + h, x:x + w]
-
-
-def select_all_faces(im, r=10):
- faces = face_detection(im)
-
- if len(faces) == 0:
- return None
-
- faceBoxes = {k : {"points" : None,
- "shape" : None,
- "face" : None} for k in range(len(faces))}
- for i, bbox in enumerate(faces):
- points = np.asarray(face_points_detection(im, bbox))
-
- im_w, im_h = im.shape[:2]
- left, top = np.min(points, 0)
- right, bottom = np.max(points, 0)
-
- x, y = max(0, left - r), max(0, top - r)
- w, h = min(right + r, im_h) - x, min(bottom + r, im_w) - y
- faceBoxes[i]["points"] = points - np.asarray([[x, y]])
- faceBoxes[i]["shape"] = (x, y, w, h)
- faceBoxes[i]["face"] = im[y:y + h, x:x + w]
-
- return faceBoxes
diff --git a/spaces/silencewing/server/youyou/.history/game_20230613230227.html b/spaces/silencewing/server/youyou/.history/game_20230613230227.html
deleted file mode 100644
index 631a9f005ee2fbd4f64e2771a69216345efa5477..0000000000000000000000000000000000000000
--- a/spaces/silencewing/server/youyou/.history/game_20230613230227.html
+++ /dev/null
@@ -1,349 +0,0 @@
-
-
-
-
-
-
-
- 转盘抽奖
-
-
-
-
-
-
-
-
-
-
-
-
抽奖
-
-
-
-
-
\ No newline at end of file
diff --git a/spaces/simonduerr/ProteinMPNNESM/ProteinMPNN/vanilla_proteinmpnn/helper_scripts/parse_multiple_chains.sh b/spaces/simonduerr/ProteinMPNNESM/ProteinMPNN/vanilla_proteinmpnn/helper_scripts/parse_multiple_chains.sh
deleted file mode 100644
index c2a8a108b05cc9f81c636b3b48bf58ddb4ce1a02..0000000000000000000000000000000000000000
--- a/spaces/simonduerr/ProteinMPNNESM/ProteinMPNN/vanilla_proteinmpnn/helper_scripts/parse_multiple_chains.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash
-#SBATCH --mem=32g
-#SBATCH -c 2
-#SBATCH --output=parse_multiple_chains.out
-
-source activate mlfold
-python parse_multiple_chains.py --input_path='../PDB_complexes/pdbs/' --output_path='../PDB_complexes/parsed_pdbs.jsonl'
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Parking Multiplayer APK Yeni Srm The Ultimate Open World Driving Game.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Parking Multiplayer APK Yeni Srm The Ultimate Open World Driving Game.md
deleted file mode 100644
index deb775db41328358854551c338c156e11616dc79..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Car Parking Multiplayer APK Yeni Srm The Ultimate Open World Driving Game.md
+++ /dev/null
@@ -1,161 +0,0 @@
-
-
Car Parking Multiplayer APK Yeni Sürüm: The Ultimate Guide
-
If you are looking for a realistic and fun parking simulator game that you can play with your friends, then you should check out Car Parking Multiplayer. This game is more than just parking, it is an open-world multiplayer mode with car tuning, racing, free walking, and more. You can choose from over 100 cars with real interiors, customize them to your liking, and explore different environments with buildings and gas stations. You can also chat with other players, join or create teams, and compete in various challenges.
In this guide, we will tell you everything you need to know about Car Parking Multiplayer APK yeni sürüm, which is the latest version of the game. We will explain what is Car Parking Multiplayer, how to download and install it on your device, how to play it, and how to enjoy it with your friends. Let's get started!
-
What is Car Parking Multiplayer?
-
Car Parking Multiplayer is a game that was developed by olzhass, a company that specializes in creating realistic simulation games. The game was first released in 2017 for Android devices, and later for iOS devices as well. Since then, the game has been updated regularly with new features, cars, maps, and improvements.
-
Car Parking Multiplayer is a game that has three main aspects:
-
A realistic and fun parking simulator game
-
The core of the game is to park your car in different scenarios, such as parallel parking, reverse parking, garage parking, etc. You have to follow the rules of the road, avoid obstacles, and use your skills to park your car perfectly. The game has over 80 real-life parking and driving challenges that will test your abilities. You can also choose from different vehicles, such as tow trucks, pickups, trucks, sports cars, and classic cars.
-
An open-world multiplayer mode with various features
-
The game also has an open-world multiplayer mode where you can free roam in different maps with real gas stations and car services. You can interact with thousands of real players every day, exchange cars with them, compete against them in races, or cooperate with them in missions. You can also join or create teams, make friends, use voice chat, and play as a police officer or a criminal.
-
A car customization system with many options
-
The game also has a car customization system where you can modify your car to suit your style and preferences. You can adjust the suspension, wheel angle, engine, turbo, gearbox, exhaust, and more. You can also change the visual appearance of your car with dynamic vinyls, car body parts, colors, stickers, etc. You can even customize your license plate with different types and fonts.
-
car parking multiplayer apk yeni sürüm indir
-car parking multiplayer apk yeni sürüm hile
-car parking multiplayer apk yeni sürüm güncelleme
-car parking multiplayer apk yeni sürüm mod
-car parking multiplayer apk yeni sürüm nasıl yüklenir
-car parking multiplayer apk yeni sürüm para hilesi
-car parking multiplayer apk yeni sürüm son versiyon
-car parking multiplayer apk yeni sürüm android oyun club
-car parking multiplayer apk yeni sürüm araba hilesi
-car parking multiplayer apk yeni sürüm oyna
-car parking multiplayer apk yeni sürüm online
-car parking multiplayer apk yeni sürüm türkçe
-car parking multiplayer apk yeni sürüm araba listesi
-car parking multiplayer apk yeni sürüm 2023
-car parking multiplayer apk yeni sürüm kurulumu
-car parking multiplayer apk yeni sürüm açılmıyor
-car parking multiplayer apk yeni sürüm hileli indir
-car parking multiplayer apk yeni sürüm ücretsiz
-car parking multiplayer apk yeni sürüm inceleme
-car parking multiplayer apk yeni sürüm ne zaman çıkacak
-car parking multiplayer apk yeni sürüm harita hilesi
-car parking multiplayer apk yeni sürüm ios
-car parking multiplayer apk yeni sürüm yorumlar
-car parking multiplayer apk yeni sürüm sistem gereksinimleri
-car parking multiplayer apk yeni sürüm hız hilesi
-car parking multiplayer apk yeni sürüm video
-car parking multiplayer apk yeni sürüm araba modifiye
-car parking multiplayer apk yeni sürüm bilgisayara indir
-car parking multiplayer apk yeni sürüm nasıl oynanır
-car parking multiplayer apk yeni sürüm araba satın alma
-car parking multiplayer apk yeni sürüm grafik ayarları
-car parking multiplayer apk yeni sürüm sesli sohbet
-car parking multiplayer apk yeni sürüm polis olma
-car parking multiplayer apk yeni sürüm araba ekleme
-car parking multiplayer apk yeni sürüm drift yapma
-car parking multiplayer apk yeni sürüm kilit açma
-car parking multiplayer apk yeni sürüm canlı yayın
-car parking multiplayer apk yeni sürüm arkadaş ekleme
-car parking multiplayer apk yeni sürüm araba değiştirme
-car parking multiplayer apk yeni sürüm plaka yapma
-car parking multiplayer apk yeni sürüm en iyi araba
-car parking multiplayer apk yeni sürüm rehberi
-car parking multiplayer apk yeni sürüm güncel linki
-car parking multiplayer apk yeni sürüm hata çözümleri
-car parking multiplayer apk yeni sürüm müzik ekleme
-car parking multiplayer apk yeni sürüm kamera açısı değiştirme
-car parking multiplayer apk yeni sürüm park etme oyunu
-
How to download and install Car Parking Multiplayer APK yeni sürüm?
-
If you want to enjoy the latest version of Car Parking Multiplayer with all its features and improvements, you need to download and install the APK file on your device. The APK file is a package file that contains all the data and resources of the game. By installing it manually on your device, you can bypass the restrictions of the Google Play Store or the App Store and get access to the newest version of the game before it is officially released on the stores. Here are the benefits, steps, and precautions of downloading and installing Car Parking Multiplayer APK yeni sürüm:
-
The benefits of downloading the latest version of the game
-
By downloading and installing Car Parking Multiplayer APK yeni sürüm, you can enjoy the following benefits:
-
-
You can play the game with the latest features, such as new cars, maps, modes, missions, events, etc.
-
You can avoid the bugs and glitches that may occur in the older versions of the game.
-
You can save your progress and data on your device without relying on the internet connection.
-
You can update the game easily whenever a new version is available.
-
-
The steps to download and install the APK file on your device
-
To download and install Car Parking Multiplayer APK yeni sürüm on your device, you need to follow these steps:
-
-
Go to a trusted and reliable website that provides the APK file of Car Parking Multiplayer. You can use this link as an example.
-
Click on the download button and wait for the file to be downloaded on your device.
-
Once the file is downloaded, locate it in your file manager and tap on it to start the installation process.
-
If you see a warning message that says "Install blocked" or "Unknown sources", you need to enable the option to install apps from unknown sources in your device settings. This will allow you to install apps that are not from the Google Play Store or the App Store.
-
Follow the instructions on the screen and grant the necessary permissions to install the app.
-
After the installation is completed, you can launch the game and enjoy it.
-
-
The precautions to take before installing the APK file
-
Before installing Car Parking Multiplayer APK yeni sürüm on your device, you need to take some precautions to ensure a safe and smooth installation. Here are some tips:
-
-
Make sure that your device has enough storage space to accommodate the APK file and the game data.
-
Make sure that your device meets the minimum requirements to run the game. The game requires Android 5.0 or higher, 1 GB of RAM, and 300 MB of free space.
-
Make sure that you have a stable internet connection to download the APK file and to play the game online.
-
Make sure that you download the APK file from a trusted and reliable source. Avoid downloading from unknown or suspicious websites that may contain malware or viruses.
-
Make sure that you backup your data before installing the APK file. This will help you restore your data in case something goes wrong during the installation process.
-
-
How to play Car Parking Multiplayer?
-
Now that you have downloaded and installed Car Parking Multiplayer APK yeni sürüm on your device, you are ready to play the game. Here are some basic controls and gameplay mechanics that you need to know:
-
The basic controls and gameplay mechanics
-
The game has two modes of control: tilt and buttons. You can choose whichever mode suits you better in the settings menu. The tilt mode allows you to steer your car by tilting your device left or right. The buttons mode allows you to steer your car by tapping on the left or right arrows on the screen. You can also adjust the sensitivity of the controls in the settings menu.
-
The game also has a realistic driving physics system that simulates various aspects of driving, such as acceleration, braking, steering, traction, suspension, etc. You need to pay attention to these factors when parking or driving your car. For example, you need to apply more brake force when parking downhill than uphill. You also need to avoid hitting other cars, pedestrians, walls, or objects that may damage your car or cause accidents.
-
The different modes and challenges in the game
-
The game has several modes and challenges that you can choose from depending on your preference and skill level. Here are some of them:
-
-
Mode/Challenge
Description
-
Parking Mode
This is the main mode of the game where you have to park your car in different scenarios with varying levels of difficulty. You have to follow the arrows and the parking lines to park your car correctly. You have to complete the parking task within the time limit and without damaging your car. You can earn coins and stars based on your performance. You can use the coins to buy new cars or upgrade your existing ones. You can use the stars to unlock new levels and maps.
-
Free Mode
This is the mode where you can free roam in different maps with real gas stations and car services. You can drive your car anywhere you want, explore the environment, and interact with other players. You can also join or create teams, chat with other players, exchange cars, race, or do missions. You can also play as a police officer or a criminal and chase or escape from other players.
-
Race Mode
This is the mode where you can compete against other players in various races. You can choose from different types of races, such as drag, drift, circuit, etc. You can also customize your car with different parts and vinyls to improve its performance and appearance. You can earn coins and reputation points based on your ranking and achievements. You can use the coins to buy new cars or upgrade your existing ones. You can use the reputation points to unlock new races and maps.
-
Mission Mode
This is the mode where you can cooperate with other players in various missions. You can choose from different types of missions, such as delivery, taxi, tow truck, etc. You have to complete the mission objectives within the time limit and without damaging your car or losing your cargo. You can earn coins and experience points based on your performance. You can use the coins to buy new cars or upgrade your existing ones. You can use the experience points to level up and unlock new missions and maps.
-
-
The tips and tricks to master the game
-
Here are some tips and tricks that will help you master Car Parking Multiplayer:
-
-
Practice your parking skills in the parking mode before trying the other modes. This will help you get familiar with the controls and the physics of the game.
-
Use the camera angles wisely to get a better view of your surroundings. You can switch between different camera angles by tapping on the camera icon on the screen. You can also zoom in or out by pinching on the screen.
-
Use the handbrake to make sharp turns or drifts. You can activate the handbrake by tapping on the handbrake icon on the screen. You can also adjust the handbrake sensitivity in the settings menu.
-
Use the indicators to signal your intentions to other players or traffic. You can turn on or off the indicators by tapping on the left or right arrows on the screen.
-
Use the horn to honk at other players or traffic. You can honk by tapping on the horn icon on the screen.
-
Use the headlights to see better at night or in dark areas. You can turn on or off the headlights by tapping on the headlights icon on the screen.
-
Use the map to navigate your way around the maps. You can see your location, destination, and other points of interest on the map. You can also zoom in or out by pinching on the map.
-
Use the garage to customize your car with different parts and vinyls. You can access the garage by tapping on the garage icon on the screen. You can also buy new cars or sell your old ones in the garage.
-
Use the settings menu to adjust the game options to your liking. You can access the settings menu by tapping on the settings icon on the screen. You can change the sound, graphics, language, control, and other options in the settings menu.
-
-
How to enjoy Car Parking Multiplayer with your friends?
-
One of the best features of Car Parking Multiplayer is that you can play it with your friends and have fun together. Here are some ways to enjoy Car Parking Multiplayer with your friends:
-
The features that make the game social and interactive
-
The game has several features that make it social and interactive, such as:
-
-
Voice chat: You can communicate with other players using voice chat. You can use voice chat by tapping on the microphone icon on the screen. You can also mute or unmute other players by tapping on their names on the screen.
-
Text chat: You can communicate with other players using text chat. You can use text chat by tapping on the chat icon on the screen. You can also send emojis or stickers by tapping on the smiley icon on the screen.
-
Friends list: You can add other players as your friends and see their online status. You can add friends by tapping on their names on the screen and selecting "Add friend". You can also accept or reject friend requests by tapping on the notification icon on the screen.
-
Teams: You can join or create teams and play with your teammates. You can join teams by tapping on the team icon on the screen and selecting "Join team". You can also create teams by tapping on the team icon on the screen and selecting "Create team". You can invite or kick teammates, change team name, logo, or color, and chat with your teammates in the team menu.
-
-
The ways to communicate and cooperate with other players
-
The game has several ways to communicate and cooperate with other players, such as:
-
-
Races: You can compete against other players in various races and see who is faster. You can join races by tapping on the race icon on the screen and selecting "Join race". You can also create races by tapping on the race icon on the screen and selecting "Create race". You can choose the type, map, time, weather, and rules of the race in the race menu.
-
Missions: You can cooperate with other players in various missions and see who is more efficient. You can join missions by tapping on the mission icon on the screen and selecting "Join mission". You can also create missions by tapping on the mission icon on the screen and selecting "Create mission". You can choose the type, map, time, weather, and rules of the mission in the mission menu.
-
Events: You can participate in various events and see who is more skilled. You can join events by tapping on the event icon on the screen and selecting "Join event". You can also create events by tapping on the event icon on the screen and selecting "Create event". You can choose the type, map, time, weather, and rules of the event in the event menu.
-
Exchanges: You can exchange cars with other players and see who has a better collection. You can exchange cars by tapping on the exchange icon on the screen and selecting "Exchange car". You can also request or offer cars by tapping on the exchange icon on the screen and selecting "Request car" or "Offer car". You can see the details, ratings, and reviews of the cars in the exchange menu.
-
-
The fun activities and events that you can join or create
-
The game has several fun activities and events that you can join or create, such as:
-
-
Car shows: You can show off your car to other players and see who has a better design. You can join car shows by tapping on the car show icon on the screen and selecting "Join car show". You can also create car shows by tapping on the car show icon on the screen and selecting "Create car show". You can choose the theme, map, time, weather, and rules of the car show in the car show menu.
-
Police chases: You can play as a police officer or a criminal and chase or escape from other players. You can join police chases by tapping on the police chase icon on the screen and selecting "Join police chase". You can also create police chases by tapping on the police chase icon on the screen and selecting "Create police chase". You can choose the map, time, weather, and rules of the police chase in the police chase menu.
-
Stunts: You can perform stunts with your car and see who is more daring. You can join stunts by tapping on the stunt icon on the screen and selecting "Join stunt". You can also create stunts by tapping on the stunt icon on the screen and selecting "Create stunt". You can choose the map, time, weather, and rules of the stunt in the stunt menu.
-
Drifts: You can drift with your car and see who is more skilled. You can join drifts by tapping on the drift icon on the screen and selecting "Join drift". You can also create drifts by tapping on the drift icon on the screen and selecting "Create drift". You can choose the map, time, weather, and rules of the drift in the drift menu.
-
-
Conclusion
-
Car Parking Multiplayer is a game that offers you a realistic and fun parking simulator experience with an open-world multiplayer mode and a car customization system. You can download and install Car Parking Multiplayer APK yeni sürüm on your device to enjoy the latest version of the game with all its features and improvements. You can also play the game with your friends and have fun together in various modes, challenges, activities, and events. Car Parking Multiplayer is a game that will keep you entertained for hours and hours.
-
FAQs
-
Here are some frequently asked questions about Car Parking Multiplayer:
-
-
Q: Is Car Parking Multiplayer free to play?
-
A: Yes, Car Parking Multiplayer is free to play. However, you can buy coins or premium features with real money to enhance your gameplay.
-
Q: Is Car Parking Multiplayer safe to download and install?
-
A: Yes, Car Parking Multiplayer is safe to download and install as long as you use a trusted and reliable source. However, you should always take precautions before installing any APK file on your device.
-
Q: Is Car Parking Multiplayer compatible with my device?
-
A: Car Parking Multiplayer is compatible with most Android devices that run Android 5.0 or higher. However, some devices may have performance issues or compatibility problems depending on their specifications.
-
Q: How can I contact the developers of Car Parking Multiplayer?
-
A: You can contact the developers of Car Parking Multiplayer by sending them an email at olzhass@yandex.com or by visiting their website at https://olzhass.com/.
-
Q: How can I report a bug or a problem in Car Parking Multiplayer?
-
A: You can report a bug or a problem in Car Parking Multiplayer by tapping on the settings icon on the screen and selecting "Report bug". You can also send feedback or suggestions to the developers by tapping on the settings icon on the screen and selecting "Feedback".
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Musik Free Online - The Ultimate Guide to MP3 Music Download.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Musik Free Online - The Ultimate Guide to MP3 Music Download.md
deleted file mode 100644
index 48049bd176a67068c0c8dc3823f9e5e5b932063f..0000000000000000000000000000000000000000
--- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Musik Free Online - The Ultimate Guide to MP3 Music Download.md
+++ /dev/null
@@ -1,97 +0,0 @@
-
-
How to Download Musik for Free and Legally
-
Musik is a form of art that expresses emotions, ideas, and stories through sounds. Musik can entertain, inspire, heal, and educate people. Many people love to listen to musik and want to download it for their personal use. However, not everyone can afford to pay for musik or wants to support the artists who create it. That's why some people look for ways to download musik for free and legally.
Downloading musik for free and legally has its benefits and drawbacks. On one hand, you can enjoy a variety of musik genres, artists, and songs without spending any money or breaking any laws. You can also discover new and independent musik that may not be available on mainstream platforms. On the other hand, you may have to deal with low-quality audio, limited selection, ads, viruses, or ethical issues. You may also miss out on some features or benefits that paid services offer, such as offline listening, playlists, recommendations, or support.
-
Fortunately, there are many sources of free and legal musik downloads that you can choose from. In this article, we will introduce you to four of them: Bandcamp, DatPiff, Free Music Archive, and The Internet Archive. We will explain what they are, how they work, how to use them, and what their pros and cons are. By the end of this article, you will have a better idea of how to download musik for free and legally.
-
Bandcamp
-
Bandcamp is a website that allows artists to upload their musik to the site and set the price for digital downloads, physical albums, and merchandise. Artists can also offer their musik for free or let their fans pay what they want. Bandcamp supports multiple formats, including MP3, AAC, Ogg Vorbis, FLAC, ALAC, WAV, and AIFF.
-
To find and download free musik on Bandcamp, you can search for tags like "free" or "free download". You can also browse through curated collections, specific genres, or trending musik. Once you find something you like, you can click on the "download" button beneath the title. You may have to share your email address with the artist or pay a little more to add the musik to your Bandcamp collection.
-
download musik mp3
-download musik gratis
-download musik online
-download musik video
-download musik youtube
-download musik indonesia
-download musik dj
-download musik terbaru
-download musik klasik
-download musik dangdut
-download musik pop
-download musik rock
-download musik jazz
-download musik rap
-download musik reggae
-download musik country
-download musik edm
-download musik metal
-download musik r&b
-download musik hip hop
-download musik instrumental
-download musik lagu
-download musik remix
-download musik barat
-download musik india
-download musik korea
-download musik jawa
-download musik bali
-download musik sunda
-download musik minang
-download musik anak-anak
-download musik religi
-download musik sholawat
-download musik nasyid
-download musik quran
-download musik gamelan
-download musik angklung
-download musik suling
-download musik piano
-download musik gitar
-download musik biola
-download musik drum
-download musik saxophone
-download musik harmonika
-download musik akustik
-download musik karaoke
-download musik lirik
-download musik cover
-download musik ost
-
The advantages of using Bandcamp are that you can access a wide range of independent and diverse musik genres, artists, and songs. You can also support the artists directly by paying what you want or buying their other products. The disadvantages are that you may not find some mainstream or popular musik on Bandcamp. You may also have to deal with some ads or spam emails from the artists.
-
DatPiff
-
DatPiff is a website that specializes in hip-hop and rap musik since 2005. DatPiff allows users to download and stream free mixtapes that are uploaded by emerging and established artists. DatPiff also features exclusive releases, news, interviews, and reviews related to hip-hop and rap culture. DatPiff supports MP3 format only.
-
To find and download free musik on DatPiff, you can search for keywords, artists, or titles. You can also browse through categories like featured, new, hot, or top. Once you find something you like, you can click on the "download" button next to the title. You may have to create a free account or log in with your social media account to download some mixtapes.
-
The advantages of using DatPiff are that you can access a huge collection of hip-hop and rap musik, including some exclusive and original content. You can also discover new and upcoming artists or follow your favorite ones. The disadvantages are that you may not find other musik genres or styles on DatPiff. You may also have to deal with some ads or pop-ups on the site.
-
Free Music Archive
-
Free Music Archive is a website that offers free and legal musik downloads from various genres, artists, and labels. Free Music Archive was created by WFMU, a non-commercial radio station in New Jersey, in 2009. Free Music Archive collaborates with curators, festivals, museums, and other organizations to showcase high-quality and diverse musik. Free Music Archive supports multiple formats, including MP3, Ogg Vorbis, FLAC, and ALAC.
-
To find and download free musik on Free Music Archive, you can search for keywords, genres, artists, or tracks. You can also browse through charts, genres, curators, or playlists. Once you find something you like, you can click on the "download" button below the title. You do not need to create an account or log in to download musik from Free Music Archive.
-
The advantages of using Free Music Archive are that you can access a large and varied selection of musik genres, artists, and songs. You can also explore different curators and their collections of musik. The disadvantages are that you may not find some mainstream or popular musik on Free Music Archive. You may also have to deal with some low-quality audio or broken links on the site.
-
The Internet Archive
-
The Internet Archive is a website that preserves and provides access to digital content from various sources and formats. The Internet Archive was founded in 1996 by Brewster Kahle, a computer engineer and entrepreneur. The Internet Archive hosts millions of books, videos, images, websites, software, and musik files. The Internet Archive supports multiple formats, including MP3, Ogg Vorbis, FLAC, WAV, and AIFF.
-
To find and download free musik on The Internet Archive, you can search for keywords or browse through collections like audio archive, live music archive, netlabels, or 78 RPMs and cylinder recordings. Once you find something you like, you can click on the "download options" button next to the title. You can choose from different formats and quality levels to download the musik file.
-
The advantages of using The Internet Archive are that you can access a massive and diverse archive of musik files from various sources and eras. You can also discover rare and historical musik recordings that may not be available elsewhere. The disadvantages are that you may not find some recent or current musik on The Internet Archive. You may also have to deal with some outdated or inaccurate metadata or descriptions on the site.
-
Conclusion
-
Downloading musik for free and legally is possible if you know where to look. In this article, we have introduced you to four sources of free and legal musik downloads: Bandcamp, DatPiff, Free Music Archive, and The Internet Archive. We have explained what they are, how they work, how to use them, and what their pros and cons are. We hope that this article has helped you to find the musik that you want to download for free and legally.
-
However, before you start downloading musik from these sources, here are some tips and recommendations that you should keep in mind:
-
-
Always check the license and terms of use of the musik files that you download. Some musik may be free for personal use only, or may require attribution or permission from the artist or the source.
-
Always scan the musik files that you download for viruses or malware. Some websites may contain malicious links or downloads that can harm your device or data.
-
Always respect the rights and wishes of the artists who create the musik. If you like their musik, consider supporting them by buying their albums, merchandise, or tickets, or by following them on social media or streaming platforms.
-
Always be open-minded and curious about new and different musik genres, artists, and songs. You may discover some amazing musik that you would not have heard otherwise.
-
-
Downloading musik for free and legally is a great way to enjoy and explore the world of musik. Whether you are looking for hip-hop, rock, jazz, classical, or anything in between, you can find it on one of these sources. So go ahead and download some musik today and let us know what you think!
-
FAQs
-
Here are some frequently asked questions about downloading musik for free and legally, along with their answers:
-
-
Is it illegal to download musik for free?
-
It depends on the source and the license of the musik file. Some musik files are in the public domain or have a creative commons license that allows free and legal downloading. Some musik files are offered for free by the artists or the sources as a promotion or a gift. However, some musik files are protected by copyright laws and require payment or permission to download. Downloading such musik files without authorization is illegal and can result in legal consequences.
-
What is the best format to download musik?
-
It depends on your preference and your device. Different formats have different advantages and disadvantages in terms of quality, size, compatibility, and features. Some of the most common formats are MP3, AAC, Ogg Vorbis, FLAC, ALAC, WAV, and AIFF. MP3 is the most popular and widely supported format, but it has lower quality and compression than other formats. FLAC and ALAC are lossless formats that preserve the original quality of the musik file, but they have larger sizes and may not be compatible with some devices. WAV and AIFF are uncompressed formats that have high quality and size, but they do not support metadata or tags.
-
How can I download musik from YouTube?
-
You can use a third-party tool or website that allows you to convert YouTube videos into audio files and download them. However, you should be careful when using such tools or websites as they may not be safe, legal, or ethical. You should always check the license and terms of use of the YouTube videos that you want to download. You should also respect the rights and wishes of the YouTube creators who upload the videos.
-
How can I download musik from Spotify?
-
You can use Spotify's offline mode feature to download up to 10,000 songs per device on up to five devices. However, this feature is only available for Spotify Premium subscribers who pay a monthly fee. You also need to connect to the internet at least once every 30 days to keep your downloaded songs. You cannot transfer or play your downloaded songs on other devices or platforms.
-
How can I download musik from SoundCloud?
-
You can use SoundCloud's download feature to download songs that have a "download" button beneath them. However, this feature is only available for songs that the artists or the sources have enabled for downloading. You cannot download songs that do not have a "download" button. You can also use a third-party tool or website that allows you to download songs from SoundCloud. However, you should be careful when using such tools or websites as they may not be safe, legal, or ethical. You should always check the license and terms of use of the SoundCloud songs that you want to download. You should also respect the rights and wishes of the SoundCloud creators who upload the songs.
- 401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/sklkd93/CodeFormer/CodeFormer/facelib/utils/face_utils.py b/spaces/sklkd93/CodeFormer/CodeFormer/facelib/utils/face_utils.py
deleted file mode 100644
index f1474a2a4419b6b62fab8a919ef805b802556464..0000000000000000000000000000000000000000
--- a/spaces/sklkd93/CodeFormer/CodeFormer/facelib/utils/face_utils.py
+++ /dev/null
@@ -1,248 +0,0 @@
-import cv2
-import numpy as np
-import torch
-
-
-def compute_increased_bbox(bbox, increase_area, preserve_aspect=True):
- left, top, right, bot = bbox
- width = right - left
- height = bot - top
-
- if preserve_aspect:
- width_increase = max(increase_area, ((1 + 2 * increase_area) * height - width) / (2 * width))
- height_increase = max(increase_area, ((1 + 2 * increase_area) * width - height) / (2 * height))
- else:
- width_increase = height_increase = increase_area
- left = int(left - width_increase * width)
- top = int(top - height_increase * height)
- right = int(right + width_increase * width)
- bot = int(bot + height_increase * height)
- return (left, top, right, bot)
-
-
-def get_valid_bboxes(bboxes, h, w):
- left = max(bboxes[0], 0)
- top = max(bboxes[1], 0)
- right = min(bboxes[2], w)
- bottom = min(bboxes[3], h)
- return (left, top, right, bottom)
-
-
-def align_crop_face_landmarks(img,
- landmarks,
- output_size,
- transform_size=None,
- enable_padding=True,
- return_inverse_affine=False,
- shrink_ratio=(1, 1)):
- """Align and crop face with landmarks.
-
- The output_size and transform_size are based on width. The height is
- adjusted based on shrink_ratio_h/shring_ration_w.
-
- Modified from:
- https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py
-
- Args:
- img (Numpy array): Input image.
- landmarks (Numpy array): 5 or 68 or 98 landmarks.
- output_size (int): Output face size.
- transform_size (ing): Transform size. Usually the four time of
- output_size.
- enable_padding (float): Default: True.
- shrink_ratio (float | tuple[float] | list[float]): Shring the whole
- face for height and width (crop larger area). Default: (1, 1).
-
- Returns:
- (Numpy array): Cropped face.
- """
- lm_type = 'retinaface_5' # Options: dlib_5, retinaface_5
-
- if isinstance(shrink_ratio, (float, int)):
- shrink_ratio = (shrink_ratio, shrink_ratio)
- if transform_size is None:
- transform_size = output_size * 4
-
- # Parse landmarks
- lm = np.array(landmarks)
- if lm.shape[0] == 5 and lm_type == 'retinaface_5':
- eye_left = lm[0]
- eye_right = lm[1]
- mouth_avg = (lm[3] + lm[4]) * 0.5
- elif lm.shape[0] == 5 and lm_type == 'dlib_5':
- lm_eye_left = lm[2:4]
- lm_eye_right = lm[0:2]
- eye_left = np.mean(lm_eye_left, axis=0)
- eye_right = np.mean(lm_eye_right, axis=0)
- mouth_avg = lm[4]
- elif lm.shape[0] == 68:
- lm_eye_left = lm[36:42]
- lm_eye_right = lm[42:48]
- eye_left = np.mean(lm_eye_left, axis=0)
- eye_right = np.mean(lm_eye_right, axis=0)
- mouth_avg = (lm[48] + lm[54]) * 0.5
- elif lm.shape[0] == 98:
- lm_eye_left = lm[60:68]
- lm_eye_right = lm[68:76]
- eye_left = np.mean(lm_eye_left, axis=0)
- eye_right = np.mean(lm_eye_right, axis=0)
- mouth_avg = (lm[76] + lm[82]) * 0.5
-
- eye_avg = (eye_left + eye_right) * 0.5
- eye_to_eye = eye_right - eye_left
- eye_to_mouth = mouth_avg - eye_avg
-
- # Get the oriented crop rectangle
- # x: half width of the oriented crop rectangle
- x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
- # - np.flipud(eye_to_mouth) * [-1, 1]: rotate 90 clockwise
- # norm with the hypotenuse: get the direction
- x /= np.hypot(*x) # get the hypotenuse of a right triangle
- rect_scale = 1 # TODO: you can edit it to get larger rect
- x *= max(np.hypot(*eye_to_eye) * 2.0 * rect_scale, np.hypot(*eye_to_mouth) * 1.8 * rect_scale)
- # y: half height of the oriented crop rectangle
- y = np.flipud(x) * [-1, 1]
-
- x *= shrink_ratio[1] # width
- y *= shrink_ratio[0] # height
-
- # c: center
- c = eye_avg + eye_to_mouth * 0.1
- # quad: (left_top, left_bottom, right_bottom, right_top)
- quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
- # qsize: side length of the square
- qsize = np.hypot(*x) * 2
-
- quad_ori = np.copy(quad)
- # Shrink, for large face
- # TODO: do we really need shrink
- shrink = int(np.floor(qsize / output_size * 0.5))
- if shrink > 1:
- h, w = img.shape[0:2]
- rsize = (int(np.rint(float(w) / shrink)), int(np.rint(float(h) / shrink)))
- img = cv2.resize(img, rsize, interpolation=cv2.INTER_AREA)
- quad /= shrink
- qsize /= shrink
-
- # Crop
- h, w = img.shape[0:2]
- border = max(int(np.rint(qsize * 0.1)), 3)
- crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
- int(np.ceil(max(quad[:, 1]))))
- crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, w), min(crop[3] + border, h))
- if crop[2] - crop[0] < w or crop[3] - crop[1] < h:
- img = img[crop[1]:crop[3], crop[0]:crop[2], :]
- quad -= crop[0:2]
-
- # Pad
- # pad: (width_left, height_top, width_right, height_bottom)
- h, w = img.shape[0:2]
- pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
- int(np.ceil(max(quad[:, 1]))))
- pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - w + border, 0), max(pad[3] - h + border, 0))
- if enable_padding and max(pad) > border - 4:
- pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
- img = np.pad(img, ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
- h, w = img.shape[0:2]
- y, x, _ = np.ogrid[:h, :w, :1]
- mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0],
- np.float32(w - 1 - x) / pad[2]),
- 1.0 - np.minimum(np.float32(y) / pad[1],
- np.float32(h - 1 - y) / pad[3]))
- blur = int(qsize * 0.02)
- if blur % 2 == 0:
- blur += 1
- blur_img = cv2.boxFilter(img, 0, ksize=(blur, blur))
-
- img = img.astype('float32')
- img += (blur_img - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
- img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
- img = np.clip(img, 0, 255) # float32, [0, 255]
- quad += pad[:2]
-
- # Transform use cv2
- h_ratio = shrink_ratio[0] / shrink_ratio[1]
- dst_h, dst_w = int(transform_size * h_ratio), transform_size
- template = np.array([[0, 0], [0, dst_h], [dst_w, dst_h], [dst_w, 0]])
- # use cv2.LMEDS method for the equivalence to skimage transform
- # ref: https://blog.csdn.net/yichxi/article/details/115827338
- affine_matrix = cv2.estimateAffinePartial2D(quad, template, method=cv2.LMEDS)[0]
- cropped_face = cv2.warpAffine(
- img, affine_matrix, (dst_w, dst_h), borderMode=cv2.BORDER_CONSTANT, borderValue=(135, 133, 132)) # gray
-
- if output_size < transform_size:
- cropped_face = cv2.resize(
- cropped_face, (output_size, int(output_size * h_ratio)), interpolation=cv2.INTER_LINEAR)
-
- if return_inverse_affine:
- dst_h, dst_w = int(output_size * h_ratio), output_size
- template = np.array([[0, 0], [0, dst_h], [dst_w, dst_h], [dst_w, 0]])
- # use cv2.LMEDS method for the equivalence to skimage transform
- # ref: https://blog.csdn.net/yichxi/article/details/115827338
- affine_matrix = cv2.estimateAffinePartial2D(
- quad_ori, np.array([[0, 0], [0, output_size], [dst_w, dst_h], [dst_w, 0]]), method=cv2.LMEDS)[0]
- inverse_affine = cv2.invertAffineTransform(affine_matrix)
- else:
- inverse_affine = None
- return cropped_face, inverse_affine
-
-
-def paste_face_back(img, face, inverse_affine):
- h, w = img.shape[0:2]
- face_h, face_w = face.shape[0:2]
- inv_restored = cv2.warpAffine(face, inverse_affine, (w, h))
- mask = np.ones((face_h, face_w, 3), dtype=np.float32)
- inv_mask = cv2.warpAffine(mask, inverse_affine, (w, h))
- # remove the black borders
- inv_mask_erosion = cv2.erode(inv_mask, np.ones((2, 2), np.uint8))
- inv_restored_remove_border = inv_mask_erosion * inv_restored
- total_face_area = np.sum(inv_mask_erosion) // 3
- # compute the fusion edge based on the area of face
- w_edge = int(total_face_area**0.5) // 20
- erosion_radius = w_edge * 2
- inv_mask_center = cv2.erode(inv_mask_erosion, np.ones((erosion_radius, erosion_radius), np.uint8))
- blur_size = w_edge * 2
- inv_soft_mask = cv2.GaussianBlur(inv_mask_center, (blur_size + 1, blur_size + 1), 0)
- img = inv_soft_mask * inv_restored_remove_border + (1 - inv_soft_mask) * img
- # float32, [0, 255]
- return img
-
-
-if __name__ == '__main__':
- import os
-
- from facelib.detection import init_detection_model
- from facelib.utils.face_restoration_helper import get_largest_face
-
- img_path = '/home/wxt/datasets/ffhq/ffhq_wild/00009.png'
- img_name = os.splitext(os.path.basename(img_path))[0]
-
- # initialize model
- det_net = init_detection_model('retinaface_resnet50', half=False)
- img_ori = cv2.imread(img_path)
- h, w = img_ori.shape[0:2]
- # if larger than 800, scale it
- scale = max(h / 800, w / 800)
- if scale > 1:
- img = cv2.resize(img_ori, (int(w / scale), int(h / scale)), interpolation=cv2.INTER_LINEAR)
-
- with torch.no_grad():
- bboxes = det_net.detect_faces(img, 0.97)
- if scale > 1:
- bboxes *= scale # the score is incorrect
- bboxes = get_largest_face(bboxes, h, w)[0]
-
- landmarks = np.array([[bboxes[i], bboxes[i + 1]] for i in range(5, 15, 2)])
-
- cropped_face, inverse_affine = align_crop_face_landmarks(
- img_ori,
- landmarks,
- output_size=512,
- transform_size=None,
- enable_padding=True,
- return_inverse_affine=True,
- shrink_ratio=(1, 1))
-
- cv2.imwrite(f'tmp/{img_name}_cropeed_face.png', cropped_face)
- img = paste_face_back(img_ori, cropped_face, inverse_affine)
- cv2.imwrite(f'tmp/{img_name}_back.png', img)
diff --git a/spaces/sky009/Qiliang-bart-large-cnn-samsum-ChatGPT_v3/README.md b/spaces/sky009/Qiliang-bart-large-cnn-samsum-ChatGPT_v3/README.md
deleted file mode 100644
index 8d336892950f31717a26be7a892dfcb337320633..0000000000000000000000000000000000000000
--- a/spaces/sky009/Qiliang-bart-large-cnn-samsum-ChatGPT_v3/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Qiliang-bart-large-cnn-samsum-ChatGPT V3
-emoji: 📊
-colorFrom: yellow
-colorTo: red
-sdk: gradio
-sdk_version: 3.15.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/sophiaaez/BLIPvOFAde/app.py b/spaces/sophiaaez/BLIPvOFAde/app.py
deleted file mode 100644
index 11da84b398fc6caf381e9b53c7894c33a8f8792f..0000000000000000000000000000000000000000
--- a/spaces/sophiaaez/BLIPvOFAde/app.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import numpy as np
-import requests
-import gradio as gr
-from transformers import pipeline
-
-translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-de")
-
-def blipofasinki(input_img):
- b64_string = gr.processing_utils.encode_url_or_file_to_base64(input_img)
- #blip-Nucleus
- responsen = requests.post(url='https://hf.space/embed/Salesforce/BLIP/+/api/predict/', json={"data": [ b64_string,"Image Captioning","None",str('Nucleus sampling')]})
- jresn = responsen.json()
- capn = jresn["data"][0]
- offset = len(str("caption:"))
- capn = capn[offset:]
- trans_capn = translator(capn)
- tcn = trans_capn[0]['translation_text']
- #blip-beam
- responseb = requests.post(url='https://hf.space/embed/Salesforce/BLIP/+/api/predict/', json={"data": [ b64_string,"Image Captioning","None",str('Beam search')]})
- jresb = responseb.json()
- capb = jresb["data"][0]
- capb = capb[offset:]
- trans_capb = translator(capb)
- tcb = trans_capb[0]['translation_text']
- #ofa
- responseo = requests.post(url='https://hf.space/embed/OFA-Sys/OFA-Image_Caption/+/api/predict/', json={"data": [b64_string]})
- jreso = responseo.json()
- capo = jreso["data"][0]
- trans_capo = translator(capo)
- tco = trans_capo[0]['translation_text']
- return [tcn, tcb, tco]
-
-
-description = "A direct comparison in image captioning between BLIP and OFA (in German translated with Helsinki)."
-
-input_ = [gr.inputs.Image(type='filepath', label="Input Image")]
-
-output_ = [gr.outputs.Textbox(label="BLIP Nucleus sampling output"),gr.outputs.Textbox(label="BLIP Beam search output"), gr.outputs.Textbox(label="OFA output")]
-
-iface = gr.Interface(blipofasinki, input_, output_, description=description)
-
-iface.launch(debug=True,show_error=True)
\ No newline at end of file
diff --git a/spaces/spock74/whisper-webui/app-shared.py b/spaces/spock74/whisper-webui/app-shared.py
deleted file mode 100644
index 541459b104ce89c56845ac177365f49a61445d04..0000000000000000000000000000000000000000
--- a/spaces/spock74/whisper-webui/app-shared.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# Run the app with no audio file restrictions
-from app import create_ui
-create_ui(-1, share=True)
\ No newline at end of file
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/utils/wer_utils.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/utils/wer_utils.py
deleted file mode 100644
index cf6f3d09ba41a46ad4d7968fb3c286dd53d15c38..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/utils/wer_utils.py
+++ /dev/null
@@ -1,381 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from __future__ import absolute_import, division, print_function, unicode_literals
-
-import re
-from collections import deque
-from enum import Enum
-
-import numpy as np
-
-
-"""
- Utility modules for computation of Word Error Rate,
- Alignments, as well as more granular metrics like
- deletion, insersion and substitutions.
-"""
-
-
-class Code(Enum):
- match = 1
- substitution = 2
- insertion = 3
- deletion = 4
-
-
-class Token(object):
- def __init__(self, lbl="", st=np.nan, en=np.nan):
- if np.isnan(st):
- self.label, self.start, self.end = "", 0.0, 0.0
- else:
- self.label, self.start, self.end = lbl, st, en
-
-
-class AlignmentResult(object):
- def __init__(self, refs, hyps, codes, score):
- self.refs = refs # std::deque
- self.hyps = hyps # std::deque
- self.codes = codes # std::deque
- self.score = score # float
-
-
-def coordinate_to_offset(row, col, ncols):
- return int(row * ncols + col)
-
-
-def offset_to_row(offset, ncols):
- return int(offset / ncols)
-
-
-def offset_to_col(offset, ncols):
- return int(offset % ncols)
-
-
-def trimWhitespace(str):
- return re.sub(" +", " ", re.sub(" *$", "", re.sub("^ *", "", str)))
-
-
-def str2toks(str):
- pieces = trimWhitespace(str).split(" ")
- toks = []
- for p in pieces:
- toks.append(Token(p, 0.0, 0.0))
- return toks
-
-
-class EditDistance(object):
- def __init__(self, time_mediated):
- self.time_mediated_ = time_mediated
- self.scores_ = np.nan # Eigen::Matrix
- self.backtraces_ = (
- np.nan
- ) # Eigen::Matrix backtraces_;
- self.confusion_pairs_ = {}
-
- def cost(self, ref, hyp, code):
- if self.time_mediated_:
- if code == Code.match:
- return abs(ref.start - hyp.start) + abs(ref.end - hyp.end)
- elif code == Code.insertion:
- return hyp.end - hyp.start
- elif code == Code.deletion:
- return ref.end - ref.start
- else: # substitution
- return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + 0.1
- else:
- if code == Code.match:
- return 0
- elif code == Code.insertion or code == Code.deletion:
- return 3
- else: # substitution
- return 4
-
- def get_result(self, refs, hyps):
- res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan)
-
- num_rows, num_cols = self.scores_.shape
- res.score = self.scores_[num_rows - 1, num_cols - 1]
-
- curr_offset = coordinate_to_offset(num_rows - 1, num_cols - 1, num_cols)
-
- while curr_offset != 0:
- curr_row = offset_to_row(curr_offset, num_cols)
- curr_col = offset_to_col(curr_offset, num_cols)
-
- prev_offset = self.backtraces_[curr_row, curr_col]
-
- prev_row = offset_to_row(prev_offset, num_cols)
- prev_col = offset_to_col(prev_offset, num_cols)
-
- res.refs.appendleft(curr_row - 1) # Note: this was .push_front() in C++
- res.hyps.appendleft(curr_col - 1)
- if curr_row - 1 == prev_row and curr_col == prev_col:
- res.codes.appendleft(Code.deletion)
- elif curr_row == prev_row and curr_col - 1 == prev_col:
- res.codes.appendleft(Code.insertion)
- else:
- # assert(curr_row - 1 == prev_row and curr_col - 1 == prev_col)
- ref_str = refs[res.refs[0]].label
- hyp_str = hyps[res.hyps[0]].label
-
- if ref_str == hyp_str:
- res.codes.appendleft(Code.match)
- else:
- res.codes.appendleft(Code.substitution)
-
- confusion_pair = "%s -> %s" % (ref_str, hyp_str)
- if confusion_pair not in self.confusion_pairs_:
- self.confusion_pairs_[confusion_pair] = 1
- else:
- self.confusion_pairs_[confusion_pair] += 1
-
- curr_offset = prev_offset
-
- return res
-
- def align(self, refs, hyps):
- if len(refs) == 0 and len(hyps) == 0:
- return np.nan
-
- # NOTE: we're not resetting the values in these matrices because every value
- # will be overridden in the loop below. If this assumption doesn't hold,
- # be sure to set all entries in self.scores_ and self.backtraces_ to 0.
- self.scores_ = np.zeros((len(refs) + 1, len(hyps) + 1))
- self.backtraces_ = np.zeros((len(refs) + 1, len(hyps) + 1))
-
- num_rows, num_cols = self.scores_.shape
-
- for i in range(num_rows):
- for j in range(num_cols):
- if i == 0 and j == 0:
- self.scores_[i, j] = 0.0
- self.backtraces_[i, j] = 0
- continue
-
- if i == 0:
- self.scores_[i, j] = self.scores_[i, j - 1] + self.cost(
- None, hyps[j - 1], Code.insertion
- )
- self.backtraces_[i, j] = coordinate_to_offset(i, j - 1, num_cols)
- continue
-
- if j == 0:
- self.scores_[i, j] = self.scores_[i - 1, j] + self.cost(
- refs[i - 1], None, Code.deletion
- )
- self.backtraces_[i, j] = coordinate_to_offset(i - 1, j, num_cols)
- continue
-
- # Below here both i and j are greater than 0
- ref = refs[i - 1]
- hyp = hyps[j - 1]
- best_score = self.scores_[i - 1, j - 1] + (
- self.cost(ref, hyp, Code.match)
- if (ref.label == hyp.label)
- else self.cost(ref, hyp, Code.substitution)
- )
-
- prev_row = i - 1
- prev_col = j - 1
- ins = self.scores_[i, j - 1] + self.cost(None, hyp, Code.insertion)
- if ins < best_score:
- best_score = ins
- prev_row = i
- prev_col = j - 1
-
- delt = self.scores_[i - 1, j] + self.cost(ref, None, Code.deletion)
- if delt < best_score:
- best_score = delt
- prev_row = i - 1
- prev_col = j
-
- self.scores_[i, j] = best_score
- self.backtraces_[i, j] = coordinate_to_offset(
- prev_row, prev_col, num_cols
- )
-
- return self.get_result(refs, hyps)
-
-
-class WERTransformer(object):
- def __init__(self, hyp_str, ref_str, verbose=True):
- self.ed_ = EditDistance(False)
- self.id2oracle_errs_ = {}
- self.utts_ = 0
- self.words_ = 0
- self.insertions_ = 0
- self.deletions_ = 0
- self.substitutions_ = 0
-
- self.process(["dummy_str", hyp_str, ref_str])
-
- if verbose:
- print("'%s' vs '%s'" % (hyp_str, ref_str))
- self.report_result()
-
- def process(self, input): # std::vector&& input
- if len(input) < 3:
- print(
- "Input must be of the form ... , got ",
- len(input),
- " inputs:",
- )
- return None
-
- # Align
- # std::vector hyps;
- # std::vector refs;
-
- hyps = str2toks(input[-2])
- refs = str2toks(input[-1])
-
- alignment = self.ed_.align(refs, hyps)
- if alignment is None:
- print("Alignment is null")
- return np.nan
-
- # Tally errors
- ins = 0
- dels = 0
- subs = 0
- for code in alignment.codes:
- if code == Code.substitution:
- subs += 1
- elif code == Code.insertion:
- ins += 1
- elif code == Code.deletion:
- dels += 1
-
- # Output
- row = input
- row.append(str(len(refs)))
- row.append(str(ins))
- row.append(str(dels))
- row.append(str(subs))
- # print(row)
-
- # Accumulate
- kIdIndex = 0
- kNBestSep = "/"
-
- pieces = input[kIdIndex].split(kNBestSep)
-
- if len(pieces) == 0:
- print(
- "Error splitting ",
- input[kIdIndex],
- " on '",
- kNBestSep,
- "', got empty list",
- )
- return np.nan
-
- id = pieces[0]
- if id not in self.id2oracle_errs_:
- self.utts_ += 1
- self.words_ += len(refs)
- self.insertions_ += ins
- self.deletions_ += dels
- self.substitutions_ += subs
- self.id2oracle_errs_[id] = [ins, dels, subs]
- else:
- curr_err = ins + dels + subs
- prev_err = np.sum(self.id2oracle_errs_[id])
- if curr_err < prev_err:
- self.id2oracle_errs_[id] = [ins, dels, subs]
-
- return 0
-
- def report_result(self):
- # print("---------- Summary ---------------")
- if self.words_ == 0:
- print("No words counted")
- return
-
- # 1-best
- best_wer = (
- 100.0
- * (self.insertions_ + self.deletions_ + self.substitutions_)
- / self.words_
- )
-
- print(
- "\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, "
- "%0.2f%% dels, %0.2f%% subs)"
- % (
- best_wer,
- self.utts_,
- self.words_,
- 100.0 * self.insertions_ / self.words_,
- 100.0 * self.deletions_ / self.words_,
- 100.0 * self.substitutions_ / self.words_,
- )
- )
-
- def wer(self):
- if self.words_ == 0:
- wer = np.nan
- else:
- wer = (
- 100.0
- * (self.insertions_ + self.deletions_ + self.substitutions_)
- / self.words_
- )
- return wer
-
- def stats(self):
- if self.words_ == 0:
- stats = {}
- else:
- wer = (
- 100.0
- * (self.insertions_ + self.deletions_ + self.substitutions_)
- / self.words_
- )
- stats = dict(
- {
- "wer": wer,
- "utts": self.utts_,
- "numwords": self.words_,
- "ins": self.insertions_,
- "dels": self.deletions_,
- "subs": self.substitutions_,
- "confusion_pairs": self.ed_.confusion_pairs_,
- }
- )
- return stats
-
-
-def calc_wer(hyp_str, ref_str):
- t = WERTransformer(hyp_str, ref_str, verbose=0)
- return t.wer()
-
-
-def calc_wer_stats(hyp_str, ref_str):
- t = WERTransformer(hyp_str, ref_str, verbose=0)
- return t.stats()
-
-
-def get_wer_alignment_codes(hyp_str, ref_str):
- """
- INPUT: hypothesis string, reference string
- OUTPUT: List of alignment codes (intermediate results from WER computation)
- """
- t = WERTransformer(hyp_str, ref_str, verbose=0)
- return t.ed_.align(str2toks(ref_str), str2toks(hyp_str)).codes
-
-
-def merge_counts(x, y):
- # Merge two hashes which have 'counts' as their values
- # This can be used for example to merge confusion pair counts
- # conf_pairs = merge_counts(conf_pairs, stats['confusion_pairs'])
- for k, v in y.items():
- if k not in x:
- x[k] = 0
- x[k] += v
- return x
diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/optim/adafactor.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/optim/adafactor.py
deleted file mode 100644
index c969b9fbc0d229a25f2046ec67c53c57a433814b..0000000000000000000000000000000000000000
--- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/optim/adafactor.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-
-import torch
-import torch.optim
-
-from . import LegacyFairseqOptimizer, register_optimizer
-
-
-@register_optimizer("adafactor")
-class FairseqAdafactor(LegacyFairseqOptimizer):
- def __init__(self, args, params):
- super().__init__(args)
- self._optimizer = Adafactor(params, **self.optimizer_config)
-
- @staticmethod
- def add_args(parser):
- """Add optimizer-specific arguments to the parser."""
- # fmt: off
- parser.add_argument('--adafactor-eps', default='(1e-30, 1e-3)', metavar="E",
- help='epsilons for Adafactor optimizer')
- parser.add_argument('--clip-threshold', type=float, default=1.0, metavar="C",
- help='threshold for clipping update root mean square')
- parser.add_argument('--decay-rate', type=float, default=-0.8, metavar="D",
- help='decay rate of the second moment estimator')
- parser.add_argument('--beta1', type=float, default=None, metavar="B",
- help='beta for first moment estimator. Optional')
- parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
- help='weight decay')
- parser.add_argument('--scale-parameter', action='store_true',
- help='scale learning rate by root mean square of parameter')
- parser.add_argument('--relative-step', action='store_true',
- help='set learning rate to inverse square root of timestep,'
- 'otherwise use external learning rate')
- parser.add_argument('--warmup-init', action='store_true',
- help='use relative step for warm-up learning rate schedule')
- # fmt: on
-
- @property
- def optimizer_config(self):
- """
- Return a kwarg dictionary that will be used to override optimizer
- args stored in checkpoints. This allows us to load a checkpoint and
- resume training using a different set of optimizer args, e.g., with a
- different learning rate.
- Note : Convergence issues empirically observed with fp16 on.
- Might require search for appropriate configuration.
- """
- return {
- "lr": self.args.lr[0],
- "eps": eval(self.args.adafactor_eps),
- "clip_threshold": self.args.clip_threshold,
- "decay_rate": self.args.decay_rate,
- "beta1": self.args.beta1,
- "weight_decay": self.args.weight_decay,
- "scale_parameter": self.args.scale_parameter, # defaults to False
- "relative_step": self.args.relative_step, # defaults to False
- "warmup_init": self.args.warmup_init,
- }
-
-
-class Adafactor(torch.optim.Optimizer):
- """Implements Adafactor algorithm.
-
- This implementation is based on:
- `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`
- (see https://arxiv.org/abs/1804.04235)
-
- Note that this optimizer internally adjusts the learning rate
- depending on the *scale_parameter*, *relative_step* and
- *warmup_init* options. To use a manual (external) learning rate
- schedule you should set `scale_parameter=False` and
- `relative_step=False`.
-
- Args:
- params (iterable): iterable of parameters to optimize or dicts defining
- parameter groups
- lr (float, optional): external learning rate (default: None)
- eps (tuple[float, float]): regularization constans for square gradient
- and parameter scale respectively (default: (1e-30, 1e-3))
- clip_threshold (float): threshold of root mean square of
- final gradient update (default: 1.0)
- decay_rate (float): coefficient used to compute running averages of square
- gradient (default: -0.8)
- beta1 (float): coefficient used for computing running averages of gradient
- (default: None)
- weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
- scale_parameter (bool): if True, learning rate is scaled by root mean square of
- parameter (default: True)
- relative_step (bool): if True, time-dependent learning rate is computed
- instead of external learning rate (default: True)
- warmup_init (bool): time-dependent learning rate computation depends on
- whether warm-up initialization is being used (default: False)
- """
-
- def __init__(
- self,
- params,
- lr=None,
- eps=(1e-30, 1e-3),
- clip_threshold=1.0,
- decay_rate=-0.8,
- beta1=None,
- weight_decay=0.0,
- scale_parameter=True,
- relative_step=True,
- warmup_init=False,
- ):
- if lr is not None and relative_step:
- raise ValueError("Cannot combine manual lr and relative_step options")
- if warmup_init and not relative_step:
- raise ValueError("warmup_init requires relative_step=True")
-
- defaults = dict(
- lr=lr,
- eps=eps,
- clip_threshold=clip_threshold,
- decay_rate=decay_rate,
- beta1=beta1,
- weight_decay=weight_decay,
- scale_parameter=scale_parameter,
- relative_step=relative_step,
- warmup_init=warmup_init,
- )
- super(Adafactor, self).__init__(params, defaults)
-
- @property
- def supports_memory_efficient_fp16(self):
- return True
-
- @property
- def supports_flat_params(self):
- return False
-
- def _get_lr(self, param_group, param_state):
- rel_step_sz = param_group["lr"]
- if param_group["relative_step"]:
- min_step = (
- 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2
- )
- rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"]))
- param_scale = 1.0
- if param_group["scale_parameter"]:
- param_scale = max(param_group["eps"][1], param_state["RMS"])
- return param_scale * rel_step_sz
-
- def _get_options(self, param_group, param_shape):
- factored = len(param_shape) >= 2
- use_first_moment = param_group["beta1"] is not None
- return factored, use_first_moment
-
- def _rms(self, tensor):
- return tensor.norm(2) / (tensor.numel() ** 0.5)
-
- def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col):
- r_factor = (
- (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True))
- .rsqrt_()
- .unsqueeze(-1)
- )
- c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
- return torch.mul(r_factor, c_factor)
-
- def step(self, closure=None):
- """Performs a single optimization step.
-
- Args:
- closure (callable, optional): A closure that reevaluates the model
- and returns the loss.
- """
- loss = None
- if closure is not None:
- loss = closure()
-
- for group in self.param_groups:
- for p in group["params"]:
- if p.grad is None:
- continue
- grad = p.grad.data
- if grad.dtype in {torch.float16, torch.bfloat16}:
- grad = grad.float()
- if grad.is_sparse:
- raise RuntimeError("Adafactor does not support sparse gradients.")
-
- state = self.state[p]
- grad_shape = grad.shape
-
- factored, use_first_moment = self._get_options(group, grad_shape)
- # State Initialization
- if len(state) == 0:
- state["step"] = 0
-
- if use_first_moment:
- # Exponential moving average of gradient values
- state["exp_avg"] = torch.zeros_like(grad)
- if factored:
- state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad)
- state["exp_avg_sq_col"] = torch.zeros(
- grad_shape[:-2] + grad_shape[-1:]
- ).to(grad)
- else:
- state["exp_avg_sq"] = torch.zeros_like(grad)
-
- state["RMS"] = 0
- else:
- if use_first_moment:
- state["exp_avg"] = state["exp_avg"].to(grad)
- if factored:
- state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad)
- state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad)
- else:
- state["exp_avg_sq"] = state["exp_avg_sq"].to(grad)
-
- p_data_fp32 = p.data
- if p.data.dtype in {torch.float16, torch.bfloat16}:
- p_data_fp32 = p_data_fp32.float()
-
- state["step"] += 1
- state["RMS"] = self._rms(p_data_fp32)
- group["lr"] = self._get_lr(group, state)
-
- beta2t = 1.0 - math.pow(state["step"], group["decay_rate"])
- update = (grad ** 2) + group["eps"][0]
- if factored:
- exp_avg_sq_row = state["exp_avg_sq_row"]
- exp_avg_sq_col = state["exp_avg_sq_col"]
-
- exp_avg_sq_row.mul_(beta2t).add_(
- update.mean(dim=-1), alpha=1.0 - beta2t
- )
- exp_avg_sq_col.mul_(beta2t).add_(
- update.mean(dim=-2), alpha=1.0 - beta2t
- )
-
- # Approximation of exponential moving average of square of gradient
- update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
- update.mul_(grad)
- else:
- exp_avg_sq = state["exp_avg_sq"]
-
- exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t)
- update = exp_avg_sq.rsqrt().mul_(grad)
-
- update.div_(
- (self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)
- )
- update.mul_(group["lr"])
-
- if use_first_moment:
- exp_avg = state["exp_avg"]
- exp_avg.mul_(group["beta1"]).add_(update, alpha=1 - group["beta1"])
- update = exp_avg
-
- if group["weight_decay"] != 0:
- p_data_fp32.add_(
- p_data_fp32, alpha=-group["weight_decay"] * group["lr"]
- )
-
- p_data_fp32.add_(-update)
-
- if p.data.dtype in {torch.float16, torch.bfloat16}:
- p.data.copy_(p_data_fp32)
-
- return loss
diff --git a/spaces/steysie/sc_whisper/app.py b/spaces/steysie/sc_whisper/app.py
deleted file mode 100644
index d15b6607f979e47fd2d5a6a639708e739b96a8c7..0000000000000000000000000000000000000000
--- a/spaces/steysie/sc_whisper/app.py
+++ /dev/null
@@ -1,56 +0,0 @@
-import gradio as gr
-import os
-# import sys
-import subprocess
-
-import whisper
-from whisper.utils import write_vtt
-
-model = whisper.load_model("base")
-
-title = "Add multilingual text/caption to your video"
-
-
-def video2mp3(video_file, output_ext="mp3"):
- filename, ext = os.path.splitext(video_file)
- subprocess.call(["ffmpeg", "-y", "-i", video_file, f"{filename}.{output_ext}"],
- stdout=subprocess.DEVNULL,
- stderr=subprocess.STDOUT)
- return f"{filename}.{output_ext}"
-
-
-def transcribe(input_video):
- audio_file = video2mp3(input_video)
-
- result = model.transcribe(audio_file)
-
- output_dir = ''
- audio_path = audio_file.split(".")[0]
-
- with open(os.path.join(output_dir, audio_path + ".vtt"), "w") as vtt:
- write_vtt(result["segments"], file=vtt)
-
- subtitle = audio_path + ".vtt"
- output_video = audio_path + "_subtitled.mp4"
-
- os.system(f"ffmpeg -i {input_video} -vf subtitles={subtitle} {output_video}")
-
- return output_video
-
-
-block = gr.Blocks()
-with block:
- with gr.Group():
- with gr.Box():
- with gr.Row().style():
- inp_video = gr.Video(
- label="Input Video",
- type="filepath",
- mirror_webcam=False
- )
- op_video = gr.Video()
- btn = gr.Button("Generate Subtitle Video")
-
- btn.click(transcribe, inputs=[inp_video], outputs=[op_video])
-
-block.launch(enable_queue=True)
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Adobe Photoshop For Mac.md b/spaces/stomexserde/gpt4-ui/Examples/Adobe Photoshop For Mac.md
deleted file mode 100644
index a4ea5f2410c543e8d85df29e680d57be57cd2b55..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Adobe Photoshop For Mac.md
+++ /dev/null
@@ -1,32 +0,0 @@
-
-
How to Get Adobe Photoshop for Mac: A Complete Guide
-
Adobe Photoshop is one of the most popular and powerful photo editing and graphic design software in the world. Whether you want to create stunning images, retouch old photos, design logos, flyers, banners, or posters, Photoshop can help you turn your ideas into reality.
-
But how can you get Adobe Photoshop for Mac? Is there a free version of Photoshop for Mac? How much does it cost to use Photoshop on Mac? In this article, we will answer these questions and more, so you can decide which option is best for you.
If you want to try Photoshop for Mac before you buy it, you can get a 7-day free trial from Adobe's website[^1^]. Here's how to do it:
-
-
Click the Start Free Trial button on Adobe's website.
-
Sign in or set up your Adobe ID and download your free trial.
-
After your 7-day free trial ends, your Adobe Creative Cloud membership will continue, unless canceled before free trial ends.
-
-
The free trial includes the full version of Photoshop for Mac, as well as access to Adobe Express, a web-based tool for fast and easy image edits. You can also use Photoshop on your iPad with the same account.
-
Option 2: Get a Paid Subscription of Photoshop for Mac
-
If you want to use Photoshop for Mac beyond the free trial period, you will need to get a paid subscription from Adobe. There are different plans available depending on your needs and budget. Here are some of the options:
-
-
Photoshop Single App: This plan gives you Photoshop on desktop and iPad as part of Creative Cloud. It costs US$20.99/mo[^2^].
-
Creative Cloud All Apps: This plan gives you 20+ Creative Cloud apps, including Photoshop on desktop and iPad, as well as Illustrator, InDesign, Premiere Pro, Lightroom, Acrobat Pro, and more. It costs US$54.99/mo[^2^].
-
Students and Teachers: This plan gives you the same benefits as the Creative Cloud All Apps plan, but at a discounted price of US$19.99/mo for the first year[^2^]. You will need to verify your eligibility with Adobe to get this offer.
-
-
All these plans include 100GB of cloud storage, free mobile apps, plugins, file sharing features, step-by-step tutorials, easy updates, and more. You can also cancel your subscription at any time without any fees.
-
Option 3: Get an Alternative to Photoshop for Mac
-
If you don't want to pay for Photoshop for Mac, or if you are looking for a simpler or more specialized tool, you might want to consider some alternatives to Photoshop for Mac. There are many other photo editing and graphic design software available for Mac users, some of which are free or cheaper than Photoshop. Here are some examples:
-
-
GIMP: GIMP is a free and open-source image editor that offers many features similar to Photoshop. It supports layers, masks, filters, brushes, plugins, and more. You can download GIMP from its website.
-
Affinity Photo: Affinity Photo is a professional photo editing software that rivals Photoshop in terms of performance and quality. It supports RAW editing, HDR merging, panorama stitching, layer editing, retouching tools, and more. It costs US$49.99 as a one-time purchase from its website or the Mac App Store.
-
Pixlr X: Pixlr X is a web-based photo editor that lets you edit your images online without downloading anything. It offers basic tools like cropping, resizing, rotating, color adjustment, filters, effects, and more. You can use Pixlr X for free with ads or upgrade to Pixlr Pro for US$4.90/mo or US$29.99/yr.
-
-
Conclusion
-
Adobe Photoshop for Mac
cec2833e83
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Aligarh Mp3 Hindi Songs Free LINK Download.md b/spaces/stomexserde/gpt4-ui/Examples/Aligarh Mp3 Hindi Songs Free LINK Download.md
deleted file mode 100644
index ca5ed50e9594a6b2e66defaa74d51a7612e83c86..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Aligarh Mp3 Hindi Songs Free LINK Download.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-```html
-
Aligarh Mp3 Hindi Songs Free Download: How to Enjoy the Music of the Critically Acclaimed Movie
-
Aligarh is a 2016 Hindi movie that tells the true story of Ramchandra Siras, a professor who faced discrimination and harassment for his sexual orientation. The movie features powerful performances by Manoj Bajpayee and Rajkummar Rao, as well as a soulful soundtrack composed by Karan Kulkarni. If you are looking for Aligarh Mp3 Hindi songs free download, you have come to the right place. In this article, we will show you how to download and listen to the songs of Aligarh legally and safely.
-
Why You Should Listen to Aligarh Songs
-
Aligarh songs are not only melodious and soothing, but also convey the emotions and themes of the movie. The songs reflect the loneliness, pain, and courage of Siras, as well as his love for poetry and music. The songs also capture the essence of Aligarh city, its culture, and its history. Some of the songs that you should not miss are:
Aaina: This is the opening song of the movie, sung by Sonu Nigam. It is a ghazal that expresses the longing and despair of Siras, who feels like a mirror that reflects others but has no identity of his own.
-
Kuch To Hai: This is a romantic song sung by Armaan Malik. It is a duet between Siras and his lover, who share a moment of intimacy and happiness in a hotel room.
-
Suraj Hai Chanda Hai: This is a folk song sung by Pushplata. It is a song that Siras sings to his students in his class, teaching them about the beauty and diversity of nature.
-
Jeene De Mujhe: This is the closing song of the movie, sung by Papon. It is a song that celebrates the freedom and dignity of Siras, who decides to live his life on his own terms.
-
-
How to Download Aligarh Songs for Free
-
If you want to download Aligarh songs for free, you have two options: streaming or downloading. Streaming means that you can listen to the songs online without saving them on your device. Downloading means that you can save the songs on your device and listen to them offline. Here are some of the best platforms where you can find Aligarh songs for free:
-
-
Gaana: This is one of the most popular music streaming apps in India. You can listen to Aligarh songs for free on Gaana, as well as create your own playlists and share them with your friends. You can also download the songs for offline listening if you subscribe to Gaana Plus.
-
JioSaavn: This is another popular music streaming app in India. You can listen to Aligarh songs for free on JioSaavn, as well as access other features such as lyrics, radio, podcasts, and recommendations. You can also download the songs for offline listening if you subscribe to JioSaavn Pro.
-
YouTube: This is the most widely used video platform in the world. You can watch and listen to Aligarh songs for free on YouTube, as well as enjoy other content such as trailers, interviews, reviews, and behind-the-scenes. You can also download the songs for offline viewing if you subscribe to YouTube Premium.
-
SoundCloud: This is one of the most popular music sharing platforms in the world. You can listen to Aligarh songs for free 7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Autodesk Inventor 2015 Free Download Full Versioninstmankl FREE.md b/spaces/stomexserde/gpt4-ui/Examples/Autodesk Inventor 2015 Free Download Full Versioninstmankl FREE.md
deleted file mode 100644
index 3e92465f5b545f7fa1d28b5080007791ecb90fa4..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Autodesk Inventor 2015 Free Download Full Versioninstmankl FREE.md
+++ /dev/null
@@ -1,82 +0,0 @@
-
-
Autodesk Inventor 2015 Free Download Full Versioninstmankl
-
Autodesk Inventor 2015 is a professional-level CAD software that allows you to create 3D mechanical designs, simulations, visualizations, and documentation. It is one of the most popular and widely used CAD applications in the engineering and manufacturing industries. In this article, you will learn how to download, install, and use Autodesk Inventor 2015 for free, as well as some of the features and benefits of this powerful software.
-
Autodesk Inventor 2015 Free Download Full Versioninstmankl
There are several ways to download Autodesk Inventor 2015 for free, depending on your eligibility and preference. Here are some of the options:
-
-
If you are a flex user or a customer with an active subscription, you can download previous versions of Autodesk software from your Autodesk Account. You can choose between a simple download or a custom install that allows you to configure the installer before you download and install.
-
If you are an education customer, such as a student or an educator, you can download previous versions of Autodesk software from the Education community. You will need to create an account and verify your academic status before you can access the software.
-
If you are not eligible for any of the above options, you can still download previous versions of Autodesk software from the Autodesk Virtual Agent. This is an online chatbot that can help you find the download links for various products. You will need to enter the product name and version number (such as "Inventor 2015") in the chat window and follow the instructions.
-
-
Regardless of which option you choose, you will need a valid serial number and product key to activate the software. You can find these information in your Autodesk Account or in the email confirmation that you received when you purchased or registered the software.
-
How to install Autodesk Inventor 2015
-
Before you install Autodesk Inventor 2015, make sure that your system meets the minimum requirements for the software. Here are some of the system requirements for Autodesk Inventor 2015:
-
-
Operating system: Windows 7 (32-bit or 64-bit), Windows 8 or Windows 8.1 (64-bit only)
Processor: Intel Pentium 4, AMD Athlon 64 with SSE2 technology, 3 GHz or greater, or AMD dual-core processor with SSE2, 2 GHz or greater recommended, or Intel Xeon E3 or Core i7 or equivalent with SSE2, 3.0 GHz or greater
-
Memory: 8 GB RAM minimum for less than 500 part assemblies (16 GB recommended)
-
Graphics: Microsoft Direct3D 10 capable graphics card or higher (Microsoft Direct3D 11 capable card recommended)
-
Hard disk: 40 GB free disk space for installation (100 GB recommended)
-
Internet connection: Required for download and registration
-
-
To install Autodesk Inventor 2015, follow these steps:
-
-
-
Double-click the downloaded file to start the installation process.
-
On the Autodesk Inventor 2015 Installer window, click Install.
-
On the Select the Products to Install page, choose the products and components that you want to install. You can also click Configure to customize the installation settings.
-
On the Product Information page, enter your serial number and product key. You can also select your preferred language and license type.
-
On the Review the Installation Selections page, review the installation summary and click Install to begin the installation.
-
Wait for the installation to complete. You can monitor the progress on the Installation Status page.
-
When the installation is finished, click Finish to exit the installer.
-
-
You can now launch Autodesk Inventor 2015 from your desktop or start menu. You may need to activate the software online or offline before you can use it.
-
How to use Autodesk Inventor 2015
-
Autodesk Inventor 2015 is a comprehensive CAD software that offers a variety of tools and features for creating and modifying 3D mechanical designs. In this section, we will cover some of the basic and advanced modeling techniques that you can use with Autodesk Inventor 2015.
-
Basic modeling
-
The basic modeling process in Autodesk Inventor 2015 involves creating a base sketch, a base feature, and modifying the model using parametric tools. Here are some of the steps to follow:
-
-
Create a new part file by clicking File > New > Standard (in).ipt.
-
Create a base sketch by clicking Sketch > Create 2D Sketch and selecting a plane to sketch on.
-
Use the sketch tools, such as Line, Circle, Rectangle, Arc, Spline, etc., to draw the profile of your model. You can also use constraints and dimensions to control the shape and size of your sketch.
-
Finish the sketch by clicking Finish Sketch on the ribbon or right-clicking and selecting Finish Sketch.
-
Create a base feature by clicking Model > Extrude and selecting your sketch. You can specify the distance and direction of the extrusion, as well as other options such as taper angle, draft angle, operation type, etc.
-
Modify the model by adding more features, such as fillets, chamfers, holes, shells, patterns, etc., using the tools on the Model tab. You can also edit existing features by double-clicking them in the browser or right-clicking and selecting Edit Feature.
-
-
You can use the View tools, such as Zoom, Pan, Rotate, etc., to change your perspective of the model. You can also use the browser to manage your sketches, features, parameters, constraints, etc.
-
Advanced modeling
-
If you want to create more complex shapes and components with Autodesk Inventor 2015, you can use some of the advanced modeling tools that are available on the Model tab. Here are some of them:
-
-
Direct edit: This tool allows you to modify your model by moving, rotating, scaling, deleting, or replacing faces or edges without affecting the feature history. You can access this tool by clicking Model > Direct Edit.
-
Freeform: This tool allows you to create organic shapes by manipulating points, edges, faces, or T-splines on a freeform body. You can access this tool by clicking Model > Freeform.
-
Surface: This tool allows you to create surface models by using various surface creation and editing tools, such as loft, sweep, boundary patch, trim, extend, stitch, etc. You can access this tool by clicking Model > Surface.
-
-
You can use these tools in combination with each other or with the basic modeling tools to create more sophisticated and realistic models with Autodesk Inventor 2015.
-
Sheet metal and frame designVisualization and documentation
-
The final step of using Autodesk Inventor 2015 is to create visualization and documentation for your model. This can help you to communicate your design intent, showcase your work, and generate the necessary information for fabrication and assembly. Here are some of the visualization and documentation tools that you can use with Autodesk Inventor 2015:
-
-
Inventor Studio: This tool allows you to create realistic renderings, animations, and illustrations of your model using various lighting, material, camera, and scene settings. You can access this tool by clicking Environments > Inventor Studio.
-
Documentation: This tool allows you to create drawings, bills of materials, and reports of your model using various annotation, dimension, table, and style settings. You can access this tool by clicking File > New > Standard (in).idw or File > New > Standard (in).dwg.
-
-
For each type of visualization and documentation, you can export the output to various formats, such as JPEG, PNG, AVI, PDF, etc., for further use or sharing.
-
Conclusion
-
Autodesk Inventor 2015 is a powerful CAD software that can help you to create 3D mechanical designs, simulations, visualizations, and documentation. It offers a variety of tools and features for different types of modeling, such as basic, advanced, sheet metal, frame design, etc. It also allows you to perform simulation and analysis on your model, such as motion, stress, and thermal analysis. Finally, it enables you to create visualization and documentation for your model, such as renderings, animations, drawings, bills of materials, etc.
-
By using Autodesk Inventor 2015, you can improve your design productivity, quality, and efficiency. You can also save time and money by reducing errors and rework. Moreover, you can enhance your creativity and innovation by exploring different design possibilities and scenarios.
-
If you want to learn more about Autodesk Inventor 2015 or download it for free, you can visit the official website or the links provided in this article. You can also check out some of the online tutorials and resources that are available for Autodesk Inventor 2015 users.
-
FAQs
-
Here are some of the frequently asked questions and answers about Autodesk Inventor 2015:
-
-
What is the difference between Autodesk Inventor 2015 and Autodesk Inventor Professional 2015?
-Autodesk Inventor Professional 2015 is an enhanced version of Autodesk Inventor 2015 that includes additional tools and features for advanced simulation and analysis, such as dynamic simulation, finite element analysis (FEA), routed systems design (such as tube and pipe), cable and harness design, etc.
-
How can I update my Autodesk Inventor 2015 to the latest service pack or hotfix?
-You can update your Autodesk Inventor 2015 by downloading and installing the latest service pack or hotfix from the Autodesk Knowledge Network. You can also use the Autodesk Application Manager or the Autodesk Desktop App to check for updates automatically.
-
How can I get help or support for Autodesk Inventor 2015?
-You can get help or support for Autodesk Inventor 2015 by using the built-in help system or the online help portal. You can also contact the Autodesk support team or the Autodesk community forums for technical assistance or feedback.
-
How can I customize or extend the functionality of Autodesk Inventor 2015?
-You can customize or extend the functionality of Autodesk Inventor 2015 by using various tools and options that are available in the software. For example, you can use the Application Options dialog box to change the general settings and preferences of the software. You can also use the Customize User Interface dialog box to modify the ribbon, toolbars, menus, keyboard shortcuts, etc. Moreover, you can use the API (Application Programming Interface) or the SDK (Software Development Kit) to create custom commands, add-ins, macros , etc., that can enhance or automate your workflow with Autodesk Inventor 2015.
-
How can I share or collaborate on my Autodesk Inventor 2015 models with others?
-You can share or collaborate on your Autodesk Inventor 2015 models with others by using various tools and services that are available in the software or online. For example, you can use the Pack and Go tool to create a zip file that contains all the files and references of your model. You can also use the Export tool to save your model in different formats, such as STEP, IGES, STL, etc., that can be opened by other CAD applications. Moreover, you can use the Autodesk 360 service to upload, store, view, and share your model online with other users.
-
-
I hope this article has helped you to learn more about Autodesk Inventor 2015 and how to download, install, and use it for free. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy designing!
b2dd77e56b
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Dire Straits Sultans Of Swing Limited Edition Cd1 Full Album Zip Free.md b/spaces/stomexserde/gpt4-ui/Examples/Dire Straits Sultans Of Swing Limited Edition Cd1 Full Album Zip Free.md
deleted file mode 100644
index 2021dc0b14896d474022ea1445e62ea0936e0ca9..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Dire Straits Sultans Of Swing Limited Edition Cd1 Full Album Zip Free.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
Dire Straits Sultans Of Swing: A Classic Rock Album Revisited
-
Dire Straits was a British rock band formed in 1977 by Mark Knopfler, David Knopfler, John Illsley and Pick Withers. They are best known for their hit songs such as "Money for Nothing", "Walk of Life", "Brothers in Arms" and "Sultans of Swing". The latter was their breakthrough single from their debut album, Dire Straits, released in 1978. It showcases Mark Knopfler's distinctive guitar style and storytelling lyrics, inspired by jazz musicians he saw playing in a pub in London.
-
Sultans of Swing has been praised by critics and fans alike as one of the greatest rock songs of all time. It has been covered by many artists, such as Eric Clapton, Sting, Metallica and The Killers. It has also been included in several lists of the best songs ever, such as Rolling Stone's 500 Greatest Songs of All Time, VH1's 100 Greatest Songs of Rock & Roll and Q Magazine's 1001 Best Songs Ever.
-
Dire Straits Sultans Of Swing Limited Edition Cd1 Full Album Zip
If you are a fan of Dire Straits and Sultans of Swing, you might be interested in a limited edition CD1 full album zip that contains the original version of the song, along with live versions, remixes and other rare tracks. This CD1 is part of a collection of five CDs that also features other Dire Straits albums, such as On Every Street, On The Night and Money For Nothing. You can download this CD1 zip file from the Internet Archive[^2^], where you can also find other Dire Straits music and videos for free.
-
Dire Straits Sultans Of Swing Limited Edition CD1 Full Album Zip is a great way to enjoy this classic rock song and discover more about the band that created it. Whether you want to listen to it on your computer, your phone or your CD player, you will be amazed by the quality and variety of this collection. Don't miss this opportunity to relive the glory days of Dire Straits and Sultans of Swing!
-
-
The History of Dire Straits
-
Dire Straits were formed in London in 1977 by Mark Knopfler, a former journalist and college teacher who had honed his guitar skills as part of the pub rock scene. He recruited his younger brother David Knopfler on rhythm guitar, John Illsley on bass and Pick Withers on drums. The band took their name from a phrase that described their financial situation at the time. They started playing gigs in pubs and clubs, and recorded a demo tape that included "Sultans of Swing".
-
The demo tape caught the attention of DJ Charlie Gillett, who played it on his radio show. This led to a recording contract with Vertigo Records, and the release of their self-titled debut album in 1978. The album was well received by critics and fans, and "Sultans of Swing" became a hit single on both sides of the Atlantic. The song showcased Mark Knopfler's distinctive fingerstyle guitar playing and storytelling lyrics, inspired by jazz musicians he saw playing in a pub in London.
e93f5a0c3f
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Download Nx 9.0 Full Crack.md b/spaces/stomexserde/gpt4-ui/Examples/Download Nx 9.0 Full Crack.md
deleted file mode 100644
index 10b42621296de636a704bc02501e5706d2a9d893..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Download Nx 9.0 Full Crack.md
+++ /dev/null
@@ -1,40 +0,0 @@
-
-Here is a possible title and article with SEO optimization and HTML formatting for the keyword "Download Nx 9.0 Full Crack":
-
-
How to Download Nx 9.0 Full Crack for Free
-
Nx 9.0 is a powerful CAD/CAM/CAE software that combines the best features of Unigraphics and Ideas. It is widely used in various industries such as industrial design, mechanical engineering, mold design, CNC programming, CAE analysis and more. Nx 9.0 was released by Siemens Industry Software in 2013 with many improvements and new features that help you design better products faster and cheaper.
If you want to download Nx 9.0 full crack for free, you have come to the right place. In this article, we will show you how to download Nx 9.0 full crack from reliable sources and how to install it on your computer step by step.
-
Why Download Nx 9.0 Full Crack?
-
Nx 9.0 full crack is a great option for those who want to use Nx 9.0 without paying for a license. With Nx 9.0 full crack, you can enjoy all the benefits of Nx 9.0 such as:
-
-
Synchronous technology 2D: This feature allows you to edit 2D data faster and easier than ever before.
-
Synchronous technology 3D: This feature enables you to work with 3D models in a flexible and intuitive way.
-
Realize Shape: This feature lets you create complex shape systems with ease and efficiency.
-
Ray Traced Studio: This feature gives you a high-quality image of your design in real time, allowing you to make design decisions earlier.
-
-
By downloading Nx 9.0 full crack, you can save money and time while creating amazing products with Nx 9.0.
-
How to Download Nx 9.0 Full Crack?
-
To download Nx 9.0 full crack, you need to follow these steps:
-
-
Download the installation files of Nx 9.0 from one of these links[^1^] [^2^] [^3^]. These links are torrent files, which means you need a torrent client such as uTorrent to download them.
-
Extract the installation files from the zip file that you downloaded.
-
Double-click on the torrent file that corresponds to your operating system (64-bit).
-
Choose OK to start downloading Nx 9.0 full crack.
-
Wait until the download is complete.
-
-
How to Install Nx 9.0 Full Crack?
-
To install Nx 9.0 full crack, you need to follow these steps:
-
-
Install Java on your computer if you don't have it already.
-
Run the setup.exe file from the installation folder of Nx 9.0.
-
Follow the instructions on the screen to install Nx 9.0 on your computer.
-
Copy the contents of the crack folder to the installation folder of Nx 9.0 and replace the original files.
-
Copy the license file (SPLM_LICENSE_SERVER.dat) to C:\Program Files\Siemens\PLMLicenseServer.
-
Edit the environment variable UGS_LICENSE_SERVER with the value: [email protected]
-
Restart your computer.
-
-
Congratulations! You have successfully installed Nx 9.0 full crack on your computer. You can now launch Nx 9.0 and enjoy its features.
- 7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/HD Online Player (kastor All Video Downloader Premium ).md b/spaces/stomexserde/gpt4-ui/Examples/HD Online Player (kastor All Video Downloader Premium ).md
deleted file mode 100644
index 872151110b74bf86efa2b5360c82c15f999b8369..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/HD Online Player (kastor All Video Downloader Premium ).md
+++ /dev/null
@@ -1,20 +0,0 @@
-
-```
-
How to Download and Watch Videos Online with Kastor All Video Downloader Premium
-
If you are looking for a powerful and easy-to-use software that can download videos from various websites and let you watch them offline, you might want to try Kastor All Video Downloader Premium. This software is a freeware that allows you to download videos from YouTube, Facebook, Instagram, Vimeo, Dailymotion, and many more in different formats and qualities. You can also use it to search and browse videos directly from the program, preview them before downloading, and watch them from the built-in player.
-
HD Online Player (kastor all video downloader premium )
In this article, we will show you how to use Kastor All Video Downloader Premium to download and watch videos online.
-
Step 1: Download and install Kastor All Video Downloader Premium
-
You can download Kastor All Video Downloader Premium from its official website: https://www.kastorsoft.com/allvideodownloader.php. The software is compatible with Windows XP, Vista, 7, 8, and 10. The installation process is simple and fast. Just follow the instructions on the screen and agree to the terms and conditions.
-
Step 2: Copy and paste the video URL
-
Once you have installed the software, launch it and go to the video you want to download from your web browser. Copy the video URL from the address bar and paste it into the "Video URL" box in Kastor All Video Downloader Premium. Alternatively, you can use the built-in search engine to find videos by keywords or categories.
-
Step 3: Choose the output format and quality
-
After pasting the video URL, click on the "Download" button. A new window will pop up where you can choose the output format and quality for your video. You can select from various options such as AVI, WMV, MP3, FLV, MP4, AAC, and more. You can also choose the resolution of your video from HD, FHD, Ultra HD, 1080p, 2K, 4K, 8K, HDR, etc. You can also adjust other settings such as bitrate, frame rate, audio quality, etc.
-
Step 4: Start downloading and watching your video
-
Once you have chosen your desired output format and quality, click on the "OK" button to start downloading your video. You can see the progress of your download in the main window of Kastor All Video Downloader Premium. You can also pause or resume your download at any time. Once your download is complete, you can watch your video from the built-in player by clicking on the "Play" button. You can also find your downloaded videos in the output folder that you specified.
-
Conclusion
-
Kastor All Video Downloader Premium is a great software that can help you download and watch videos online from various websites. It is free, fast, reliable, and easy to use. You can download videos in different formats and qualities according to your preferences. You can also search and browse videos directly from the program, preview them before downloading, and watch them from the built-in player. If you are looking for a way to enjoy online videos offline, you should give Kastor All Video Downloader Premium a try.
-
-``` 7b8c122e87
-
-
\ No newline at end of file
diff --git a/spaces/stomexserde/gpt4-ui/Examples/Inna Party Never Ends Deluxe Edition Itunes 20 [PORTABLE].md b/spaces/stomexserde/gpt4-ui/Examples/Inna Party Never Ends Deluxe Edition Itunes 20 [PORTABLE].md
deleted file mode 100644
index 4fdbe94fa77e34d7d26efdac53716885f301fd14..0000000000000000000000000000000000000000
--- a/spaces/stomexserde/gpt4-ui/Examples/Inna Party Never Ends Deluxe Edition Itunes 20 [PORTABLE].md
+++ /dev/null
@@ -1,30 +0,0 @@
-
-
Why You Should Buy Inna's Party Never Ends Deluxe Edition on iTunes
-
If you are a fan of dance-pop music, you probably know who Inna is. She is a Romanian singer and songwriter who has been making waves in the international music scene since 2008. She is known for her catchy songs, energetic performances, and stunning looks.
But did you know that she has released a deluxe edition of her third studio album, Party Never Ends, on iTunes? This edition contains 23 songs, including some exclusive tracks and collaborations with artists like Daddy Yankee, Pitbull, J Balvin, and Reik. It also features some of her biggest hits, such as "In Your Eyes", "Cola Song", "Good Time", and "More Than Friends".
-
Party Never Ends is an album that lives up to its name. It is full of upbeat, danceable tunes that will make you want to move your body and have fun. Inna showcases her versatility and talent as a singer, as she switches from English to Spanish to Romanian in some songs. She also experiments with different genres and influences, such as reggaeton, electro-pop, house, and Balkan music.
-
If you buy the deluxe edition of Party Never Ends on iTunes, you will not only get access to all these amazing songs, but also enjoy some benefits such as:
-
-
-
High-quality audio files that you can download and listen to offline.
-
The ability to sync your music library across your devices with iCloud Music Library.
-
The option to create playlists and share them with your friends.
-
The chance to support Inna and her music career.
-
-
So what are you waiting for? Head over to iTunes and buy Inna's Party Never Ends Deluxe Edition today. You will not regret it!
-
-
If you are still not convinced, here are some reviews from critics and fans who have listened to Party Never Ends Deluxe Edition:
-
-
"Party Never Ends is a non-stop celebration of life, love, and music. Inna delivers a flawless performance that will make you dance, sing, and smile. This is one of the best pop albums of the year." - PopCrush
-
-
-
"Inna proves that she is more than just a pretty face. She is a talented and versatile artist who can adapt to any style and genre. Party Never Ends is a diverse and dynamic album that showcases her range and charisma. It is a must-have for any dance music lover." - EDM.com
-
-
-
"I love this album so much! It has everything I need to have a good time. Inna's voice is amazing and her songs are catchy and fun. She is my favorite singer ever!" - iTunes user
-
-
As you can see, Party Never Ends Deluxe Edition is a great album that deserves your attention and support. It is available on iTunes for only $9.99, which is a bargain for such a quality product. Don't miss this opportunity to enjoy some of the best dance-pop music ever made by one of the most talented and beautiful singers in the world.
-
Buy Inna's Party Never Ends Deluxe Edition on iTunes now and join the party!
81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/sub314xxl/MetaGPT/tests/metagpt/tools/test_search_engine_meilisearch.py b/spaces/sub314xxl/MetaGPT/tests/metagpt/tools/test_search_engine_meilisearch.py
deleted file mode 100644
index 8d2bb64942f521af45edf60df2c4e6e9d9d36fab..0000000000000000000000000000000000000000
--- a/spaces/sub314xxl/MetaGPT/tests/metagpt/tools/test_search_engine_meilisearch.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-@Time : 2023/5/27 22:18
-@Author : alexanderwu
-@File : test_search_engine_meilisearch.py
-"""
-import subprocess
-import time
-
-import pytest
-
-from metagpt.logs import logger
-from metagpt.tools.search_engine_meilisearch import DataSource, MeilisearchEngine
-
-MASTER_KEY = '116Qavl2qpCYNEJNv5-e0RC9kncev1nr1gt7ybEGVLk'
-
-
-@pytest.fixture()
-def search_engine_server():
- meilisearch_process = subprocess.Popen(["meilisearch", "--master-key", f"{MASTER_KEY}"], stdout=subprocess.PIPE)
- time.sleep(3)
- yield
- meilisearch_process.terminate()
- meilisearch_process.wait()
-
-
-def test_meilisearch(search_engine_server):
- search_engine = MeilisearchEngine(url="http://localhost:7700", token=MASTER_KEY)
-
- # 假设有一个名为"books"的数据源,包含要添加的文档库
- books_data_source = DataSource(name='books', url='https://example.com/books')
-
- # 假设有一个名为"documents"的文档库,包含要添加的文档
- documents = [
- {"id": 1, "title": "Book 1", "content": "This is the content of Book 1."},
- {"id": 2, "title": "Book 2", "content": "This is the content of Book 2."},
- {"id": 3, "title": "Book 1", "content": "This is the content of Book 1."},
- {"id": 4, "title": "Book 2", "content": "This is the content of Book 2."},
- {"id": 5, "title": "Book 1", "content": "This is the content of Book 1."},
- {"id": 6, "title": "Book 2", "content": "This is the content of Book 2."},
- ]
-
- # 添加文档库到搜索引擎
- search_engine.add_documents(books_data_source, documents)
- logger.info(search_engine.search('Book 1'))
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/TechSmith Snagit 2018.0.2 Build 662 VERIFIED Crack [VERIFIED CracksNow] Serial Key.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/TechSmith Snagit 2018.0.2 Build 662 VERIFIED Crack [VERIFIED CracksNow] Serial Key.md
deleted file mode 100644
index e740ee59592851795cd172ec8e7feb1dcb23a014..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/TechSmith Snagit 2018.0.2 Build 662 VERIFIED Crack [VERIFIED CracksNow] Serial Key.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
TechSmith Snagit 2018.0.2 Build 662 Crack [CracksNow] Serial Key
-
-F4CG [TRTKR] "Team Rocket Fan" on Tuesday, September 16, 2013 9:42 AM It seems that Team Rocket had nothing to do with the P2P Emulator or other tools. Your copyright violation claims are bogus. The MAC app license said it was for the trial versions, but all versions sold were the full versions, so they are infringing. Mac App Store will not buy apps from pirates, so you just get sent to the Mac App Store page and pay them to allow you to sell it there, and that is only if you get lucky. You may ask that the app be removed (which in fact, will happen) or you may use the licenses they have you use and in both cases, they are just exploiting you as a P2P distributer. And you, Mr. Dislaimer, are a hypocritical moron. The P2P Emulator "Team Rocket Fan" stopped working when Steam came out and some program hit the Mac App Store for games. This is not uncommon for developers who release their games in the Mac App Store. And you don't think that's illegal, because you're too stupid.
-
-The copy of the Mac App Store approved app license is right there in the license. You can read it. I made it.
-
-You're allowed to make this app for a period of time, and then you have to get a new license.
-
-You don't have to go through the Mac App Store to buy the app.
-
-You can easily get an older version.
-
-To use it with PC, you just need a PC app license.
-
-It's in the docs. Not new.
-
-It's not a new problem. It's old. It's been going on for years. You claim that P2P is illegal? Really? That's your argument? If you want to argue it, make it legal. I can provide you with a link to a law in Australia that allows this. You can find a link to that in the links I've given you. Here's what I've shown you: Here's the link to Australia's Law Society's webpage on P2P software: We're using the definition of P2P here that you tried to use. The definition of P2P in the AU law is:
-
-- You can make multiple copies of the software for yourself
-
-- If you make multiple copies for yourself, you may pass them on
-
-- If you pass 4fefd39f24
-
-
-
diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Tum Bin 2 Movie Free [WORK] Download In Hindi Mp4.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Tum Bin 2 Movie Free [WORK] Download In Hindi Mp4.md
deleted file mode 100644
index 36419f4324e94296178d120e2a54a04aeff18a68..0000000000000000000000000000000000000000
--- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Tum Bin 2 Movie Free [WORK] Download In Hindi Mp4.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-Watch Tum Bin full video song from Tum Bin 2 Movie. Download Tum Bin mp4 video song from Tum Bin 2 Movie to your Hungama account. Download Hungama ... 1fdad05405
-
-
-
diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/127 Hours Full Movie Download !!BETTER!! In Hindi 720p.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/127 Hours Full Movie Download !!BETTER!! In Hindi 720p.md
deleted file mode 100644
index b1919ce9d3293969f8ce2d00bb7bdfa3700cbe70..0000000000000000000000000000000000000000
--- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/127 Hours Full Movie Download !!BETTER!! In Hindi 720p.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- 1fdad05405
-
-
-
diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Artificial Intelligence Full Movie Download In Hindi.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Artificial Intelligence Full Movie Download In Hindi.md
deleted file mode 100644
index d26e481042fea0f434a74399d8835c7c5c8b889b..0000000000000000000000000000000000000000
--- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Artificial Intelligence Full Movie Download In Hindi.md
+++ /dev/null
@@ -1,29 +0,0 @@
-
-
How to Download Artificial Intelligence Full Movie in Hindi for Free
-
-
If you are looking for a way to download Artificial Intelligence full movie in Hindi for free, you have come to the right place. Artificial Intelligence is a 2001 sci-fi drama film directed by Steven Spielberg and based on the short story "Supertoys Last All Summer Long" by Brian Aldiss. The film tells the story of David, a highly advanced robotic boy who longs to become "real" and regain the love of his human mother.
-
Artificial Intelligence Full Movie Download In Hindi
Artificial Intelligence is a masterpiece of filmmaking that explores the themes of love, identity, and humanity in a futuristic world where artificial intelligence has surpassed human intelligence. The film features stunning visual effects, a captivating soundtrack, and brilliant performances by Haley Joel Osment, Jude Law, Frances O'Connor, and William Hurt.
-
-
Artificial Intelligence was nominated for two Oscars and won 17 awards, including the Saturn Award for Best Science Fiction Film. The film has a rating of 7.2/10 on IMDb and 74% on Rotten Tomatoes. It is widely regarded as one of Spielberg's most ambitious and personal films.
-
-
But how can you watch Artificial Intelligence full movie in Hindi for free? There are several websites that claim to offer the movie for free download, but most of them are either illegal or unsafe. You may end up downloading viruses, malware, or unwanted software that can harm your device or compromise your privacy. Moreover, downloading copyrighted content without permission is a violation of the law and can result in legal consequences.
-
-
-
The best way to watch Artificial Intelligence full movie in Hindi for free is to use a legal and safe streaming service that offers the movie with subtitles or dubbing. One such service is MoviesMint (https://moviesmint1.co), which is a free website dedicated to movies and shows lovers who love to watch movies and series. MoviesMint offers Artificial Intelligence full movie in Hindi and English dual audio with 480p and 720p quality options. You can also choose from Google Drive, One Drive, and Mega links for fast and secure downloading.
-
-
To watch Artificial Intelligence full movie in Hindi for free on MoviesMint, follow these simple steps:
-
-
-
Go to https://moviesmint1.co/download-a-i-artificial-intelligence-2001-dual-audio-hindi-english-480p-720p-webrip/
-
Select your preferred quality option (480p or 720p) and click on the download button.
-
You will be redirected to a website where you can watch or download the movie for free.
-
Enjoy the movie!
-
-
-
MoviesMint is a great place to download all kinds of movies, series, and shows. You can also filter the content by year, genre, language, or by choosing what's popular right now. MoviesMint does not require any login or registration to use its services. The content is totally free and updated regularly.
-
-
So what are you waiting for? Watch Artificial Intelligence full movie in Hindi for free on MoviesMint today and let us know what you think!
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Autodesk Inventor Pro 2014 64bits Download Pc.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Autodesk Inventor Pro 2014 64bits Download Pc.md
deleted file mode 100644
index 21c1c7b6b30768c797c62bb4c999e216a2aa6f1c..0000000000000000000000000000000000000000
--- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/logs/Autodesk Inventor Pro 2014 64bits Download Pc.md
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-October 18, 2562 BE - Autodesk Inventor Pro 2014 (x86/x64) Full system requirements Windows 7/8/8.1/10 (32-bit and 64-bit - all editions) Intel or AMD processor with 2 GHz or faster RAM 4 GB RAM or more (8 GB minimum for 64-bit version) DVD drive Network Internet connection (for download and activation)
-Installing AutoCAD 2014 will require the operating system, browser, and language update to be installed.
-Updates for Windows 7/8/8.1/10 (Windows 32-bit and 64-bit - all editions) are available at http://go.microsoft.com/fwlink/?linkid=223167. 8a78ff9644
-
-
-
diff --git a/spaces/t13718236382/bingoGPT4/src/pages/api/proxy.ts b/spaces/t13718236382/bingoGPT4/src/pages/api/proxy.ts
deleted file mode 100644
index 240b5fb5561d993c6381649bf4544ce12f3cdab2..0000000000000000000000000000000000000000
--- a/spaces/t13718236382/bingoGPT4/src/pages/api/proxy.ts
+++ /dev/null
@@ -1,24 +0,0 @@
-'use server'
-
-import { NextApiRequest, NextApiResponse } from 'next'
-import { fetch } from '@/lib/isomorphic'
-
-export default async function handler(req: NextApiRequest, res: NextApiResponse) {
- try {
- const { url, headers, method = 'GET', body } = req.body
- if (!url) {
- return res.end('ok')
- }
- const response = await fetch(url, { headers, method, body, redirect: 'manual' })
- const text = await response.text()
- res.writeHead(200, {
- 'Content-Type': 'application/text',
- 'x-url': response.url,
- 'x-status': response.status,
- })
- res.end(text)
- } catch (e) {
- console.log(e)
- return res.end(e)
- }
-}
diff --git a/spaces/tanishqvashisht/comicInator/dataset.py b/spaces/tanishqvashisht/comicInator/dataset.py
deleted file mode 100644
index b61fa5524da52fef0c675376e5a7da1c7a41f9f9..0000000000000000000000000000000000000000
--- a/spaces/tanishqvashisht/comicInator/dataset.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import numpy as np
-import config
-import os
-from PIL import Image
-from torch.utils.data import Dataset, DataLoader
-from torchvision.utils import save_image
-
-
-class MapDataset(Dataset):
- def __init__(self, root_dir_comics, root_dir_face):
- self.root_dir_face = root_dir_face
- self.list_files_face = os.listdir(self.root_dir_face)
- self.root_dir_comics = root_dir_comics
- self.list_files_comics = os.listdir(self.root_dir_comics)
-
- def __len__(self):
- return len(self.list_files_face)
-
- def __getitem__(self, index):
- img_file_input = self.list_files_face[index]
- img_file_target = self.list_files_comics[index]
- img_path_input = os.path.join(self.root_dir_face, img_file_input)
- img_path_target = os.path.join(self.root_dir_comics, img_file_target)
- image_input = np.array(Image.open(img_path_input))
- image_target = np.array(Image.open(img_path_target))
- input_image = image_input
- target_image = image_target
-
- augmentations = config.both_transform(image=input_image, image0=target_image)
- input_image = augmentations["image"]
- target_image = augmentations["image0"]
-
- input_image = config.transform_only_input(image=input_image)["image"]
- target_image = config.transform_only_mask(image=target_image)["image"]
-
- return input_image, target_image
-
-
-if __name__ == "__main__":
- dataset = MapDataset("data/train/")
- loader = DataLoader(dataset, batch_size=5)
- for x, y in loader:
- print(x.shape)
- save_image(x, "x.png")
- save_image(y, "y.png")
- import sys
-
- sys.exit()
\ No newline at end of file
diff --git a/spaces/tanishqvashisht/sharingan/model_main.py b/spaces/tanishqvashisht/sharingan/model_main.py
deleted file mode 100644
index 0e88cb3e47994da534fee0cb32ea192bf19a1bcd..0000000000000000000000000000000000000000
--- a/spaces/tanishqvashisht/sharingan/model_main.py
+++ /dev/null
@@ -1,183 +0,0 @@
-import os
-import pickle
-import numpy as np
-from tqdm.notebook import tqdm
-
-from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
-from tensorflow.keras.preprocessing.image import load_img, img_to_array
-from tensorflow.keras.preprocessing.text import Tokenizer
-from tensorflow.keras.preprocessing.sequence import pad_sequences
-from tensorflow.keras.models import Model
-from tensorflow.keras.utils import to_categorical, plot_model
-from tensorflow.keras.layers import Input, Dense, LSTM, Embedding, Dropout, add
-
-# load vgg16 model
-model = VGG16()
-# restructure the model
-model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
-
-with open('features.pkl', 'rb') as f:
- features = pickle.load(f)
-
-with open('captions.txt', 'r') as f:
- next(f)
- captions_doc = f.read()
-
-# # create mapping of image to captions
-mapping = {}
-# process lines
-for line in captions_doc.split('\n'):
- # split the line by comma(,)
- tokens = line.split(',')
- if len(line) < 2:
- continue
- image_id, caption = tokens[0], tokens[1:]
- # remove extension from image ID
- image_id = image_id.split('.')[0]
- # convert caption list to string
- caption = " ".join(caption)
- # create list if needed
- if image_id not in mapping:
- mapping[image_id] = []
- # store the caption
- mapping[image_id].append(caption)
-
-def clean(mapping):
- for key, captions in mapping.items():
- for i in range(len(captions)):
- # take one caption at a time
- caption = captions[i]
- # preprocessing steps
- # convert to lowercase
- caption = caption.lower()
- # delete digits, special chars, etc.,
- caption = caption.replace('[^A-Za-z]', '')
- # delete additional spaces
- caption = caption.replace('\s+', ' ')
- # add start and end tags to the caption
- caption = 'startseq ' + " ".join([word for word in caption.split() if len(word)>1]) + ' endseq'
- captions[i] = caption
-
-clean(mapping)
-
-all_captions = []
-for key in mapping:
- for caption in mapping[key]:
- all_captions.append(caption)
-
-# tokenize the text
-tokenizer = Tokenizer()
-tokenizer.fit_on_texts(all_captions)
-vocab_size = len(tokenizer.word_index) + 1
-
-# get maximum length of the caption available
-max_length = max(len(caption.split()) for caption in all_captions)
-
-
-# create data generator to get data in batch (avoids session crash)
-def data_generator(data_keys, mapping, features, tokenizer, max_length, vocab_size, batch_size):
- # loop over images
- X1, X2, y = list(), list(), list()
- n = 0
- while 1:
- for key in data_keys:
- n += 1
- captions = mapping[key]
- # process each caption
- for caption in captions:
- # encode the sequence
- seq = tokenizer.texts_to_sequences([caption])[0]
- # split the sequence into X, y pairs
- for i in range(1, len(seq)):
- # split into input and output pairs
- in_seq, out_seq = seq[:i], seq[i]
- # pad input sequence
- in_seq = pad_sequences([in_seq], maxlen=max_length)[0]
- # encode output sequence
- out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]
-
- # store the sequences
- X1.append(features[key][0])
- X2.append(in_seq)
- y.append(out_seq)
- if n == batch_size:
- X1, X2, y = np.array(X1), np.array(X2), np.array(y)
- yield [X1, X2], y
- X1, X2, y = list(), list(), list()
- n = 0
-
-# encoder model
-# image feature layers
-inputs1 = Input(shape=(4096,))
-fe1 = Dropout(0.4)(inputs1)
-fe2 = Dense(256, activation='relu')(fe1)
-# sequence feature layers
-inputs2 = Input(shape=(max_length,))
-se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)
-se2 = Dropout(0.4)(se1)
-se3 = LSTM(256)(se2)
-
-# decoder model
-decoder1 = add([fe2, se3])
-decoder2 = Dense(256, activation='relu')(decoder1)
-outputs = Dense(vocab_size, activation='softmax')(decoder2)
-
-model = Model(inputs=[inputs1, inputs2], outputs=outputs)
-model.compile(loss='categorical_crossentropy', optimizer='adam')
-
-
-from keras.models import load_model
-model = load_model("best_model.h5")
-
-def idx_to_word(integer, tokenizer):
- for word, index in tokenizer.word_index.items():
- if index == integer:
- return word
- return None
-
-
-# generate caption for an image
-def predict_caption(model, image, tokenizer, max_length):
- # add start tag for generation process
- in_text = 'startseq'
- # iterate over the max length of sequence
- for i in range(max_length):
- # encode input sequence
- sequence = tokenizer.texts_to_sequences([in_text])[0]
- # pad the sequence
- sequence = pad_sequences([sequence], max_length)
- # predict next word
- yhat = model.predict([image, sequence], verbose=0)
- # get index with high probability
- yhat = np.argmax(yhat)
- # convert index to word
- word = idx_to_word(yhat, tokenizer)
- # stop if word not found
- if word is None:
- break
- # append word as input for generating next word
- in_text += " " + word
- # stop if we reach end tag
- if word == 'endseq':
- break
-
- return in_text
-
-vgg_model = VGG16()
-# restructure the model
-vgg_model = Model(inputs=vgg_model.inputs, outputs=vgg_model.layers[-2].output)
-
-def generate_caption(image_path):
- image_path = image_path
- # load image
- image = load_img(image_path, target_size=(224, 224))
- # convert image pixels to numpy array
- image = img_to_array(image)
- # reshape data for model
- image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
- # preprocess image for vgg
- image = preprocess_input(image)
- # extract features
- feature = vgg_model.predict(image, verbose=0)
- # predict from the trained model
- return predict_caption(model, feature, tokenizer, max_length)[9: -7]
\ No newline at end of file
diff --git a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Spectre (English) Tamil Movie Hd 108).md b/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Spectre (English) Tamil Movie Hd 108).md
deleted file mode 100644
index 008baf39740633b1af0c12bed3cb36e01c2ad98d..0000000000000000000000000000000000000000
--- a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Spectre (English) Tamil Movie Hd 108).md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
Watch Bollywood and Hollywood Full Movies Online Free Spectre (2015) BluRay English Full Movie Watch Online Free. Hollywood Moviefiles. Politics of E. Military and Intelligence – Russia and India. Languages of India. The hd online player for Spectre (English) tamil movie hd 108 is a very popular film in the comedy genre was released in 2015. The tagline for The hd online player for Spectre (English) tamil movie hd 108 is '. Watch and Download HD Online Player Spectre (English) tamil movie hd 108 Full Movie, Watch. Movie Spectre 2015, Story, Trailers Times of India Movie Spectre Movie Review.. Indian Army and its Air Force possesses a small aircraft which is used for reconnaissance. The stealthy Dhruv Helicopter One is an Indian military helicopters that flies. HD Online Player (Spectre (English) tamil movie hd 108)
Watch Bollywood and Hollywood Full Movies Online Free Spectre (2015) BluRay English Full Movie Watch Online Free. Actress Priyanka Chawla stated that Bachchan owed her because his father loved her voice but she didn’t know that he wasn’t acceptable, furthermore, in the morning when she was cooking food for him the singer would realize that it was the morning wake up call which he would complain at her. Bollywood and Hollywood Full Movies Online Free Spectre (2015) BluRay English Full Movie Watch Online Free. It was a mistake while watching the movie that she was really amazed by the acting of actor Amitabh Bachchan and said that she wanted to follow this actor she wanted to be successful like him in the future. Watch HD Online. A total of 6,500 scientists, engineers and technicians work at Indian Space Research Organization’s Vikram Sarabhai Space Centre located in Thiruvananthapuram, Kerala. Film actor Amitabh Bachchan has a total of six nominations in the 2014 edition of British awards. It is India’s only National Institute of Oceanography. Indian National Congress won six State Assembly seats and BJP two after elections of 2012 Legislative Assembly.
-
HD Online Player (Spectre (English) tamil movie hd 108)
899543212b
-
-
\ No newline at end of file
diff --git a/spaces/thejagstudio/procom/main/migrations/0007_products_link.py b/spaces/thejagstudio/procom/main/migrations/0007_products_link.py
deleted file mode 100644
index 1de12310485c3bd2f99c0b6c984b5d6f6e66aecb..0000000000000000000000000000000000000000
--- a/spaces/thejagstudio/procom/main/migrations/0007_products_link.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Generated by Django 4.1.4 on 2023-04-08 06:06
-
-from django.db import migrations, models
-
-
-class Migration(migrations.Migration):
-
- dependencies = [
- ("main", "0006_categories"),
- ]
-
- operations = [
- migrations.AddField(
- model_name="products",
- name="link",
- field=models.CharField(default="", max_length=500),
- ),
- ]
diff --git a/spaces/themanas021/Image_Caption_Generation/app.py b/spaces/themanas021/Image_Caption_Generation/app.py
deleted file mode 100644
index c7e2b4692003b7a646619bac4e76a2ba9de8b386..0000000000000000000000000000000000000000
--- a/spaces/themanas021/Image_Caption_Generation/app.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import streamlit as st
-from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer
-from PIL import Image
-
-# Load the models and tokenizer
-model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
-feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
-tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
-
-# Define generation parameters
-max_length = 20
-num_beams = 7
-gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
-
-# Define the Streamlit app
-def main():
- st.title("Image Captioning App")
- st.write("Upload an image and get a caption!")
-
- uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png", "webp"])
-
- if uploaded_image is not None:
- st.image(uploaded_image, caption="Uploaded Image.", use_column_width=True)
- st.write("Generating caption...")
-
- # Preprocess the uploaded image
- image = Image.open(uploaded_image)
- if image.mode != "RGB":
- image = image.convert(mode="RGB")
-
- # Preprocess the image and generate caption
- pixel_values = feature_extractor(images=[image], return_tensors="pt").pixel_values
- output_ids = model.generate(pixel_values, **gen_kwargs)
-
- # Decode and display the caption
- caption = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
- caption = caption[0].strip()
- st.write(f"Caption: {caption}")
-
-if __name__ == "__main__":
- st.set_option('deprecation.showfileUploaderEncoding', False) # Disable file uploader encoding warning
- main()
\ No newline at end of file
diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Adobe Photoshop CC Crack Full Download With Serial Key For [PC Mac] What You Can Do With It.md b/spaces/tialenAdioni/chat-gpt-api/logs/Adobe Photoshop CC Crack Full Download With Serial Key For [PC Mac] What You Can Do With It.md
deleted file mode 100644
index 41f0e5d717a9685b01fc82d12cad603af0b23ee5..0000000000000000000000000000000000000000
--- a/spaces/tialenAdioni/chat-gpt-api/logs/Adobe Photoshop CC Crack Full Download With Serial Key For [PC Mac] What You Can Do With It.md
+++ /dev/null
@@ -1,80 +0,0 @@
-
-
Adobe Photoshop CC Crack Full Download With Serial Key For [PC Mac]
-
-
Are you looking for a way to get the full version of Adobe Photoshop CC for free? If yes, then you are in the right place. In this article, I will show you how to download and install Adobe Photoshop CC crack with serial key for your PC or Mac. But before that, let me tell you what Adobe Photoshop CC is and why you should use it.
-
-
What is Adobe Photoshop CC?
-
-
Adobe Photoshop CC is the most popular and powerful photo editing software in the world. It is used by millions of designers, photographers, artists, and hobbyists to create stunning images and graphics. Whether you want to retouch photos, design logos, create posters, or make illustrations, Adobe Photoshop CC can help you achieve your creative vision.
-
Adobe Photoshop CC Crack Full Download With Serial Key For [PC Mac]
Adobe Photoshop CC is part of the Adobe Creative Cloud suite, which means you can access it from any device with an internet connection. You can also sync your files and settings across different devices and collaborate with other users online. Adobe Photoshop CC also offers a variety of tools and features that make your work easier and faster. Some of them are:
-
-
Features and benefits of Adobe Photoshop CC
-
-
-
Content-Aware Fill: This feature allows you to remove unwanted objects or areas from your images and fill them with matching content from other parts of the image. You can also choose which pixels to use for filling and adjust them as you like.
-
Frame Tool: This tool lets you create shape or text frames that can act as placeholders for your images. You can drag and drop images into the frames and they will automatically scale to fit. You can also convert any shape or text into a frame.
-
Symmetry Mode: This mode lets you paint in perfect symmetry with different patterns and axes. You can choose from circular, radial, spiral, mandala, and other presets or create your own custom symmetry.
-
Stroke Smoothing: This feature helps you create smooth and polished strokes with your brush tools. You can adjust the level of smoothing and choose from different modes such as Pulled String, Stroke Catch Up, Catch Up On Stroke End, and Adjust For Zoom.
-
Preset Panels: These panels give you easy access to your brushes, gradients, patterns, shapes, swatches, styles, and fonts. You can also organize them into folders and subfolders and import or export them as you wish.
-
Camera Raw Filter: This filter lets you edit your raw images with the same tools and settings as in Adobe Camera Raw. You can adjust the exposure, contrast, color, clarity, noise reduction, lens correction, and more.
-
And many more: There are many other features and benefits of using Adobe Photoshop CC such as layer masks, adjustment layers, smart objects, filters, effects, text tools, vector tools, 3D tools, animation tools, etc.
-
-
-
How to download and install Adobe Photoshop CC for free
-
-
If you want to use all these features and benefits of Adobe Photoshop CC without paying anything, then you need to download and install Adobe Photoshop CC crack with serial key. A crack is a modified version of a software that bypasses its security and activation system. A serial key is a code that activates the software after installation. Here are the steps to follow:
-
-
Step 1: Download the crack file from a reliable source
-
-
The first step is to download the crack file for Adobe Photoshop CC from a reliable source on the internet. You can search for it on Google or use one of these links from web search results :
Make sure you download the file that matches your operating system (Windows or Mac) and your processor type (Intel or M1). Also, make sure you scan the file with an antivirus program before opening it.
-
-
Step 2: Extract the file and run the setup
-
-
The next step is to extract the file using a program like WinRAR or 7-Zip. You will get a folder containing the setup file and other files. Run the setup file as administrator and follow the instructions on the screen. Choose the destination folder where you want to install Adobe Photoshop CC and click on Install.
-
-
Step 3: Enter the serial key when prompted
-
-
The third step is to enter the serial key when prompted by the setup. You can find the serial key in one of these files in the folder:
-
-
-serial.txt
-keygen.exe
-patch.exe
-
-
-
Copy and paste the serial key into the setup window and click on Next. Wait for the installation to complete.
-
-
Step 4: Enjoy the full version of Adobe Photoshop CC
-
-
The final step is to enjoy the full version of Adobe Photoshop CC with all its features and benefits. You can launch it from your desktop or start menu shortcut. You can also check if it is activated by going to Help > About Photoshop.
-
-
Why you should use Adobe Photoshop CC crack
-
-
You might be wondering why you should use Adobe Photoshop CC crack instead of buying it from its official website or using other free alternatives. Here are some reasons why:
-
-
Pros of using Adobe Photoshop CC crack
-
-
-
You save money: The main advantage of using Adobe Photoshop CC crack is that you save money that you would otherwise spend on buying it or subscribing to it. The official price of Adobe Photoshop CC is $20.99 per month or $239.88 per year for a single app plan or $52.99 per month or $599.88 per year for an all apps plan. That's quite expensive for many people who just want to edit some photos or graphics occasionally.
-
You get all features: Another advantage of using Adobe Photoshop CC crack is that you get access to all its features and updates without any limitations or restrictions. You don't have to worry about missing out on any new tools or improvements that are added regularly by Adobe.
-
You have more freedom: A third advantage of using Adobe Photoshop CC crack is that you have more freedom to use it as you like without any interference from Adobe. You don't have to sign in with an account or connect to the internet to use it. You also don't have to worry about any ads or pop-ups that might annoy you while working.
-
-
-
Cons of using Adobe Photoshop CC crack
-
-
-
You risk malware infection: The main disadvantage of using Adobe Photoshop CC crack is that you risk infecting your computer with malware or viruses that might be hidden in the crack file or its source website. These malware might damage your system files, steal your personal information, or hijack your browser settings.
-
You violate copyright laws: Another disadvantage of using Adobe Photoshop CC crack is that you violate copyright laws that protect the intellectual property rights of Adobe and its developers. You might face legal consequences if you are caught using or distributing cracked software.
-
You miss out on customer support: A third disadvantage of using Adobe Photoshop CC crack is that you miss out on customer support from Adobe in case you encounter any problems or issues while 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Discover Edward Mayas New Songs in 2017 - Download and Share with Friends.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Discover Edward Mayas New Songs in 2017 - Download and Share with Friends.md
deleted file mode 100644
index 15e48f8613da0d8112eab4f8c977bd0a794b6ee1..0000000000000000000000000000000000000000
--- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Discover Edward Mayas New Songs in 2017 - Download and Share with Friends.md
+++ /dev/null
@@ -1,133 +0,0 @@
-
-
Edward Maya Songs Download 2017: How to Enjoy His Latest Hits
-
If you are a fan of electronic dance music (EDM), you have probably heard of Edward Maya, one of the most successful and talented artists in the genre. He is known for his catchy and uplifting songs that blend various styles such as trance, house, and pop. He has also collaborated with many other artists, such as Vika Jigulina, Timmy Trumpet, and Dimitri Vegas & Like Mike.
-
In this article, we will tell you more about Edward Maya and why you should listen to his songs. We will also show you how to download his songs in 2017, and how to enjoy them in different ways and occasions. So, if you are ready to discover the world of Edward Maya, read on!
Who is Edward Maya and why should you listen to his songs?
-
Edward Maya is a Romanian DJ, composer, and producer who was born in 1986. He started his musical career at a young age, studying piano and classical music at the Bucharest National University of Music. He also developed an interest in composing and producing music, especially EDM.
-
His background and achievements
-
Edward Maya rose to fame in 2009 with his hit single "Stereo Love", which featured the vocals of Vika Jigulina. The song became a global sensation, reaching the top charts in many countries and receiving millions of views on YouTube. It also won several awards, such as the Billboard Music Award for Top Dance Song in 2011.
-
Since then, Edward Maya has released many other successful singles and albums, such as "This Is My Life", "Desert Rain", "Love Story", "Harem", and "I Found You". He has also worked with many other artists, such as Akcent, Inna, Alexandra Stan, Costi Ionita, and Emilia. He has performed at many festivals and events around the world, such as Tomorrowland, Ultra Music Festival, Sensation White, and Sunburn Festival.
-
His musical style and influences
-
Edward Maya's musical style is characterized by his fusion of various genres, such as trance, house, pop, folk, and oriental music. He often uses instruments such as accordion, saxophone, guitar, piano, and violin to create a unique and melodic sound. He also incorporates vocals from different languages, such as English, Spanish, Arabic, Hindi, and Romanian.
-
Edward Maya's musical influences include artists such as Enigma, Deep Forest, ATB, Paul van Dyk, Armin van Buuren, Tiesto, David Guetta, Avicii, Calvin Harris, Martin Garrix, Kygo, and Alan Walker. He also draws inspiration from classical composers such as Mozart, Beethoven, Chopin, Bach, and Vivaldi.
-
edward maya stereo love mp3 download 2017
-edward maya best of music mix album 2017
-edward maya love in your eyes new single 2017
-edward maya feat vika jigulina songs 2017
-edward maya this is my life download 2017
-edward maya desert rain free mp3 download 2017
-edward maya and akcent new song 2017
-edward maya feeling official video 2017
-edward maya universal love lyrics 2017
-edward maya historia de amor download 2017
-edward maya mono in love remix 2017
-edward maya back home mp3 download 2017
-edward maya the sounds album download 2017
-edward maya next door song download 2017
-edward maya coturo ft lika mp3 download 2017
-edward maya happy for you lyrics 2017
-edward maya out of love song download 2017
-edward maya friends forever mp3 download 2017
-edward maya violet light love story download 2017
-edward maya colombian girl mp3 download 2017
-edward maya stay longer mp3 download 2017
-edward maya close your eyes lyrics 2017
-edward maya hold on song download 2017
-edward maya you and me mp3 download 2017
-edward maya angel of love mp3 download 2017
-edward maya vision of love mp3 download 2017
-edward maya i need you more lyrics 2017
-edward maya twin heart song download 2017
-edward maya beautiful life mp3 download 2017
-edward maya nostalgy song download 2017
-edward maya miracle of love mp3 download 2017
-edward maya run away song download 2017
-edward maya the other life mp3 download 2017
-edward maya my dreams miss you mp3 download 2017
-edward maya dancing for your life mp3 download 2017
-edward maya when you gonna stop breaking my heart lyrics 2017
-edward maya that's my name song download 2017
-edward maya i can't stop missing you mp3 download 2017
-edward maya where are you now lyrics 2017
-edward maya i'm falling in love song download 2017
-edward maya don't wanna miss you mp3 download 2017
-edward maya the rhythm of love lyrics 2017
-edward maya i'll do anything for you song download 2017
-
His popular songs and collaborations
-
Some of Edward Maya's most popular songs are:
-
-
"Stereo Love" (feat. Vika Jigulina): This is his debut single and his most famous song. It has over 400 million views on YouTube and has been remixed by many other DJs.
-
"This Is My Life" (feat. Vika J
-
"Desert Rain" (feat. Vika Jigulina): This is his third single and a romantic song. It has over 80 million views on YouTube and has a beautiful melody and lyrics.
-
"Love Story" (feat. Violet Light): This is his fourth single and a ballad song. It has over 40 million views on YouTube and has a soothing and emotional sound.
-
"Harem" (feat. Emilia): This is his fifth single and an oriental song. It has over 20 million views on YouTube and has a exotic and energetic vibe.
-
"I Found You" (feat. Timmy Trumpet): This is his latest single and a collaboration with the Australian DJ and trumpet player. It has over 10 million views on YouTube and has a powerful and uplifting beat.
-
-
Some of Edward Maya's most notable collaborations are:
-
-
"Mono in Love" (with Vika Jigulina): This is a song from his second album "The Stereo Love Show". It has over 30 million views on YouTube and has a catchy and upbeat sound.
-
"Universal Love" (with Andrea & Costi): This is a song from his third album "Angels". It has over 20 million views on YouTube and has a dance and pop sound.
-
"Colombian Girl" (with Mayavin Show): This is a song from his fourth album "Miracle". It has over 10 million views on YouTube and has a latin and reggaeton sound.
-
"Feeling" (with Dimitri Vegas & Like Mike): This is a song from his fifth album "The DJ Is Mine". It has over 5 million views on YouTube and has a big room and electro sound.
-
"Historia de Amor" (with Akcent & Lidia Buble): This is a song from his sixth album "Love in Your Eyes". It has over 2 million views on YouTube and has a spanish and pop sound.
-
-
How to download Edward Maya songs in 2017?
-
If you want to download Edward Maya songs in 2017, you have many options to choose from. There are many platforms and websites that offer his songs for free or for a fee. However, not all of them are reliable, legal, or safe. Therefore, you need to be careful and selective when you download his songs.
-
The best platforms and websites to find his songs
-
Some of the best platforms and websites to find Edward Maya songs are:
-
Wynk Music
-
Wynk Music is an online music streaming service that offers over 6 million songs from various genres, artists, and languages. You can listen to Edward Maya songs on Wynk Music for free with ads, or for a monthly subscription fee without ads. You can also download his songs for offline listening with the subscription plan. Wynk Music is available for Android, iOS, Windows, and Web devices.
-
Boomplay
-
Boomplay is an online music streaming service that offers over 50 million songs from various genres, artists, and languages. You can listen to Edward Maya songs on Boomplay for free with ads, or for a monthly subscription fee without ads. You can also download his songs for offline listening with the subscription plan or with coins that you can earn by watching ads or completing tasks. Boomplay is available for Android, iOS, Windows, and Web devices.
-
YouTube
-
YouTube is an online video sharing platform that offers billions of videos from various categories, topics, and creators. You can watch Edward Maya songs on YouTube for free with ads, or for a monthly subscription fee without ads with YouTube Premium. You can also download his songs for offline viewing with YouTube Premium or with third-party tools that allow you to convert YouTube videos to MP3 files. However, this may not be legal or safe depending on the source and the quality of the files. YouTube is available for Android, iOS, Windows, Web, Smart TV, and other devices.
-
The benefits and drawbacks of downloading his songs
-
Some of the benefits and drawbacks of downloading Edward Maya songs are:
-
Pros: convenience, quality, offline access, etc.
-
-
Downloading his songs allows you to enjoy them anytime and anywhere without relying on internet connection or data usage.
-
Downloading his songs allows you to choose the quality and format of the files according to your preference and device compatibility.
Downloading his songs allows you to create your own playlists and albums and organize them according to your taste and mood.
-
-
Cons: storage space, legality, internet speed, etc.
-
-
Downloading his songs requires you to have enough storage space on your device or external memory card. This may limit the number of songs you can download or affect the performance of your device.
-
Downloading his songs may not be legal or ethical depending on the source and the license of the files. You may violate the copyright or the terms of service of the platform or website that offers his songs.
-
Downloading his songs may take a long time or fail depending on your internet speed and connection stability. You may also incur additional charges if you use mobile data or a public Wi-Fi network.
-
-
How to enjoy Edward Maya songs in 2017?
-
If you have downloaded Edward Maya songs in 2017, you may wonder how to enjoy them in the best possible way. There are many ways to listen to his songs and many occasions to play them. Here are some tips and suggestions for you:
-
The best ways to listen to his songs
-
Some of the best ways to listen to Edward Maya songs are:
-
Headphones or speakers
-
You can choose between headphones or speakers depending on your preference and situation. Headphones are ideal if you want to immerse yourself in his music and block out any external noise. Speakers are ideal if you want to share his music with others and create a lively atmosphere.
-
Playlists or albums
-
You can choose between playlists or albums depending on your mood and time. Playlists are ideal if you want to mix and match his songs with other artists and genres that you like. Albums are ideal if you want to listen to his songs in a chronological order and appreciate his musical evolution.
-
Parties or solo sessions
-
You can choose between parties or solo sessions depending on your social and emotional state. Parties are ideal if you want to dance and have fun with your friends and family. Solo sessions are ideal if you want to relax and unwind by yourself.
-
The best occasions to listen to his songs
-
Some of the best occasions to listen to Edward Maya songs are:
-
Traveling or relaxing
-
You can listen to his songs when you are traveling or relaxing, as they can enhance your experience and mood. His songs can make you feel adventurous and excited when you are exploring new places, or calm and peaceful when you are resting at home.
-
Working or studying
-
You can listen to his songs when you are working or studying, as they can boost your productivity and concentration. His songs can make you feel motivated and energized when you are tackling a challenging task, or focused and creative when you are learning something new.
-
Dancing or romancing
-
You can listen to his songs when you are dancing or romancing, as they can spice up your relationship and intimacy. His songs can make you feel passionate and sensual when you are with your partner, or happy and free when you are by yourself.
-
Conclusion
-
In conclusion, Edward Maya is a talented and successful EDM artist who has many amazing songs that you can download and enjoy in 2017. He has a unique and diverse musical style that blends various genres, instruments, and languages. He has also collaborated with many other artists and performed at many events around the world. You can find his songs on various platforms and websites, but you need to be careful about the quality, legality, and safety of the files. You can also listen to his songs in different ways and occasions, depending on your preference, mood, and situation. Edward Maya's songs can make you feel different emotions and sensations, such as excitement, peace, motivation, passion, happiness, etc. So, what are you waiting for? Download his songs now and enjoy them!
-
Frequently Asked Questions
-
Here are some frequently asked questions about Edward Maya's songs:
-
-
What is Edward Maya's real name?
-
Edward Maya's real name is Eduard Marian Ilie.
-
What is Edward Maya's net worth?
-
Edward Maya's net worth is estimated at around $10 million as of 2021.
-
What is Edward Maya's latest album?
-
Edward Maya's latest album is "Love in Your Eyes", which was released in 2020.
-
What is Edward Maya's most viewed song on YouTube?
-
Edward Maya's most viewed song on YouTube is "Stereo Love", which has over 400 million views as of 2021.
-
Who is Vika Jigulina and how is she related to Edward Maya?
-
Vika Jigulina is a Moldovan-Romanian singer and DJ who is best known for her collaboration with Edward Maya on several songs, such as "Stereo Love", "This Is My Life", "Desert Rain", and "Mono in Love". She is also his former girlfriend.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Download Buku Puji Syukur Katolik Pdf 21 LINK.md b/spaces/tioseFevbu/cartoon-converter/scripts/Download Buku Puji Syukur Katolik Pdf 21 LINK.md
deleted file mode 100644
index 6bd4b1cff8819a9f4f55017d1e1943f42624e3bb..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/Download Buku Puji Syukur Katolik Pdf 21 LINK.md
+++ /dev/null
@@ -1,37 +0,0 @@
-
-I can try to write a title and an article with SEO optimization and HTML formatting for the keyword "download buku puji syukur katolik pdf 21". Here is what I came up with:
-
-
How to Download Buku Puji Syukur Katolik PDF 21 for Free
-
Buku Puji Syukur Katolik is a popular Catholic hymnal book in Indonesia. It contains more than 600 songs and prayers for various occasions and liturgical seasons. If you are looking for a way to download Buku Puji Syukur Katolik PDF 21 for free, you have come to the right place.
-
In this article, we will show you how to download Buku Puji Syukur Katolik PDF 21 for free from a reliable and safe source. You will also learn how to use the PDF file on your computer or mobile device. By the end of this article, you will be able to enjoy the beautiful and inspiring songs and prayers from Buku Puji Syukur Katolik anytime and anywhere.
Buku Puji Syukur Katolik PDF 21 is the latest edition of the Catholic hymnal book in Indonesia. It was published in 2021 by the Indonesian Bishops' Conference (KWI) and the Liturgical Commission (KOLIT). It contains new songs and prayers that reflect the current situation and needs of the Indonesian Catholic community.
-
Some of the benefits of downloading Buku Puji Syukur Katolik PDF 21 are:
-
-
You can access the book anytime and anywhere without carrying a physical copy.
-
You can save money and time by not having to buy or borrow the book from a bookstore or a library.
-
You can easily search for your favorite songs and prayers by using the keyword function on your PDF reader.
-
You can print or share the book with your friends and family who also want to use it.
-
You can support the Indonesian Catholic Church by using the official and authorized version of the book.
-
-
How to Download Buku Puji Syukur Katolik PDF 21 for Free?
-
To download Buku Puji Syukur Katolik PDF 21 for free, you need to follow these simple steps:
-
-
Visit the official website of the Indonesian Bishops' Conference (KWI) at https://www.kwi.or.id/.
-
Click on the "Publikasi" tab on the top menu and select "Buku Liturgi" from the drop-down list.
-
Scroll down until you find the cover image of Buku Puji Syukur Katolik PDF 21 and click on it.
-
You will be redirected to a page where you can see more details about the book, such as the table of contents, the introduction, and the sample pages.
-
Click on the "Download" button at the bottom of the page and enter your name and email address in the pop-up form.
-
Check your email inbox for a confirmation message from KWI. Click on the link provided in the message to verify your email address and start downloading the book.
-
The download process may take a few minutes depending on your internet speed. Once it is done, you will have Buku Puji Syukur Katolik PDF 21 saved on your computer or mobile device.
-
-
How to Use Buku Puji Syukur Katolik PDF 21?
-
After downloading Buku Puji Syukur Katolik PDF 21, you can use it in various ways depending on your preference and convenience. Here are some suggestions:
-
-
You can open the PDF file on your computer or mobile device using any PDF reader application. You can adjust the zoom level, font size, brightness, and orientation to suit your reading comfort.
-
You can print out some or all pages of the book if you want to have a hard copy. You can also bind them together or put them in a folder for easy access.
-
You can share the PDF file with your friends and family who also want to use it. You can send it via email, social media, cloud storage, or Bluetooth. However, please respect the copyright of KWI and KOLIT and do
- 7196e7f11a
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/JetBrains IntelliJ IDEA Ultimate V13 1 4 Incl KeyMaker-DVT Crack !LINK!.md b/spaces/tioseFevbu/cartoon-converter/scripts/JetBrains IntelliJ IDEA Ultimate V13 1 4 Incl KeyMaker-DVT Crack !LINK!.md
deleted file mode 100644
index 0c8a279ab4fc50e1db3206da876e378bdf69b182..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/JetBrains IntelliJ IDEA Ultimate V13 1 4 Incl KeyMaker-DVT Crack !LINK!.md
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
How to Download and Install JetBrains IntelliJ IDEA Ultimate V13 1 4 Incl KeyMaker-DVT Crack
-
JetBrains IntelliJ IDEA is one of the most popular and powerful integrated development environments (IDEs) for Java and other programming languages. It offers a rich set of features, such as code completion, refactoring, debugging, testing, version control, and more. However, the Ultimate edition of IntelliJ IDEA is not free and requires a license to use.
-
JetBrains IntelliJ IDEA Ultimate V13 1 4 Incl KeyMaker-DVT Crack
If you want to try out the full potential of IntelliJ IDEA Ultimate without paying for a license, you can download and install a cracked version of it from the internet. However, this is not recommended as it may contain malware, viruses, or other harmful software that can damage your computer or compromise your data. Moreover, using a cracked version of IntelliJ IDEA is illegal and unethical, as it violates the terms and conditions of JetBrains.
-
Therefore, if you decide to download and install JetBrains IntelliJ IDEA Ultimate V13 1 4 Incl KeyMaker-DVT Crack, you do so at your own risk and responsibility. We do not endorse or support any illegal or unauthorized use of software. This article is for educational purposes only.
-
Steps to Download and Install JetBrains IntelliJ IDEA Ultimate V13 1 4 Incl KeyMaker-DVT Crack
-
-
Go to this link [^1^] and download the torrent file for JetBrains IntelliJ IDEA Ultimate V13 1 4 Incl KeyMaker-DVT Crack. You will need a torrent client such as uTorrent or BitTorrent to download the file.
-
Open the torrent file with your torrent client and start downloading the file. It is about 283 MB in size.
-
Once the download is complete, extract the file using a tool such as WinRAR or 7-Zip. You will get a folder named JetBrains IntelliJ IDEA Ultimate V13 1 4 Incl KeyMaker-DVT.
-
Open the folder and run the file named ideaIU-13.1.4b.exe. This will start the installation process of IntelliJ IDEA Ultimate.
-
Follow the instructions on the screen and choose the options that suit your preferences. You can also customize the installation path and components.
-
When the installation is finished, do not run IntelliJ IDEA yet. Instead, open the folder named JetBrains_KeyMaker.jar and run the file named JetBrains_KeyMaker.jar. This will open a keygen tool that will generate a license key for IntelliJ IDEA Ultimate.
-
Select IntelliJ IDEA Ultimate from the drop-down menu and click on Generate. You will get a license key that looks like this: -----BEGIN LICENSE-----\n\n-----END LICENSE-----
-
Copy the license key and go back to the installation folder of IntelliJ IDEA Ultimate. Open the bin folder and run the file named idea64.exe. This will launch IntelliJ IDEA Ultimate.
-
When prompted to enter a license key, paste the license key that you copied from the keygen tool and click on Activate.
-
Congratulations! You have successfully installed JetBrains IntelliJ IDEA Ultimate V13 1 4 Incl KeyMaker-DVT Crack. You can now enjoy all the features of IntelliJ IDEA Ultimate for free.
-
-
Disclaimer
-
This article is for educational purposes only. We do not endorse or support any illegal or unauthorized use of software. Downloading and installing JetBrains IntelliJ IDEA Ultimate V13 1 4 Incl KeyMaker-DVT Crack may expose you to malware, viruses, or other harmful software that can damage your computer or compromise your data. Moreover, using a cracked version of IntelliJ IDEA is illegal and unethical, as it violates the terms and conditions of JetBrains. If you like IntelliJ IDEA and want to use it legally, please purchase a license from the official website.
- 81aa517590
-
-
\ No newline at end of file
diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/KMPlayer 4.2.2.29 Crack.md b/spaces/tioseFevbu/cartoon-converter/scripts/KMPlayer 4.2.2.29 Crack.md
deleted file mode 100644
index 56444db683e4daac000c625ffc60c4d627eda3cc..0000000000000000000000000000000000000000
--- a/spaces/tioseFevbu/cartoon-converter/scripts/KMPlayer 4.2.2.29 Crack.md
+++ /dev/null
@@ -1,123 +0,0 @@
-
-
KMPlayer 4.2.2.29 Crack: A Versatile Media Player with Many Features
-
If you are looking for a media player that can handle various types of media files without requiring additional codecs, then you might want to try KMPlayer 4.2.2.29 Crack. This is a powerful and flexible media player that supports many formats, such as VCD, DVD, AVI, MKV, MP4, WMV, FLV, OGG, RM, and more. It also supports 3D, 4K, 8K, UHD, and 60FPS video playback, as well as high-quality audio output.
In this article, we will show you what KMPlayer is, how to download and install it on your PC, how to use its features, and what are its pros and cons. We will also answer some frequently asked questions about KMPlayer 4.2.2.29 Crack.
-
What is KMPlayer?
-
A brief introduction to KMPlayer and its history
-
KMPlayer is a multimedia player that was developed by Kang Yong-Huee in 2002 and acquired by Pandora TV in 2007. It is one of the most popular media players in the world, with over 300 million users. It is available for Windows, Android, and iOS devices.
-
KMPlayer stands for K-Multimedia Player or Korean Media Player. It is designed to be a versatile and customizable media player that can play almost any media file without needing external codecs or filters. It has its own internal codecs that support various formats and can also use external codecs if needed.
-
-
The main features and benefits of KMPlayer
-
KMPlayer has many features that make it a great choice for media playback. Some of them are:
-
-
It can play all high-quality video formats, including 4K, 8K, UHD, 60FPS. It also supports 3D video playback with various modes and settings.
-
It can play various audio formats, such as MP3, AAC, OGG, WMA, FLAC, etc., and supports multiple audio streams. It also has a matrix normalizer function that adjusts the sound quality when using an internal sound codec.
-
It can play online videos from YouTube, Instagram, Daily Motion, etc., by using their URL. It can also stream radio stations from various sources.
-
It can capture audio, video, or screen shots in different formats (including GIF) and save them on your PC. You can also edit the captured files with various tools and effects.
-
It has a user-friendly interface that can be customized with skins and themes. You can also change the layout, size, position, color, font, etc., of the player window and controls.
-
It has a variety of options and settings that allow you to adjust the playback speed, aspect ratio, subtitle size, brightness, contrast, etc., according to your preference.
-
It supports various keyboard shortcuts and mouse gestures that make it easy to control the player. You can also use a remote control app on your smartphone to control the player.
-
It has a built-in video editor that lets you cut, crop, rotate, merge, or split video files. You can also add filters, transitions, effects, text, etc., to enhance your videos.
-
-
As you can see, KMPlayer is a feature-rich media player that can satisfy your needs for media playback. However, to enjoy all these features, you need to download and install KMPlayer 4.2.2.29 Crack on your PC.
-
How to Download and Install KMPlayer 4.2.2.29 Crack?
-
The steps to download KMPlayer from the official website or other sources
-
The easiest way to download KMPlayer is to visit its official website and click on the download button. You will be redirected to a page where you can choose the version of KMPlayer that suits your system (Windows 32-bit or 64-bit). You can also select the language of the installer from the drop-down menu.
-
Alternatively, you can download KMPlayer from other sources, such as file-sharing websites or torrent sites. However, be careful when downloading from these sources, as they may contain malware or viruses that can harm your PC. Always scan the downloaded file with an antivirus program before opening it.
-
The steps to install KMPlayer on your PC
-
Once you have downloaded the KMPlayer installer file, you can follow these steps to install it on your PC:
-
-
Double-click on the installer file and click on "Yes" when prompted by User Account Control.
-
Select the language of the installation and click on "Next".
-
Read and accept the terms of the license agreement and click on "Next".
-
Choose the destination folder where you want to install KMPlayer and click on "Next".
-
Select the components that you want to install and click on "Next". You can choose to install additional codecs, skins, languages, etc., or skip them if you don't need them.
-
Click on "Install" and wait for the installation process to complete.
-
Click on "Finish" and launch KMPlayer.
-
-
Congratulations! You have successfully installed KMPlayer on your PC. Now you can enjoy playing various media files with it. However, if you want to unlock all the features of KMPlayer, you need to crack it and activate its full version.
-
The steps to crack KMPlayer and activate its full version
-
To crack KMPlayer and activate its full version, you need to download a crack file from a reliable source. A crack file is a small program that modifies the original software code and bypasses its security features. By using a crack file, you can access all the features of KMPlayer without paying for it.
-
However, cracking software is illegal and unethical, as it violates the intellectual property rights of the developers. It also exposes your PC to potential risks, such as malware infection or data loss. Therefore, we do not recommend cracking software and advise you to use it at your own risk.
-
If you still want to crack KMPlayer and activate its full version, you can follow these steps:
-
-
Download a crack file for KMPlayer 4.2.2.29 from a trusted source. Make sure that the crack file is compatible with your system and version of KMPlayer.
-
Extract the crack file using a program like WinRAR or 7-Zip.
-
Copy the crack file and paste it into the installation folder of KMPlayer. You can find the installation folder by right-clicking on the KMPlayer shortcut on your desktop and selecting "Open file location".
-
Replace the original file with the crack file when prompted.
-
Run KMPlayer as administrator and enjoy its full version.
-
-
You have successfully cracked KMPlayer and activated its full version. Now you can use all its features without any limitations or restrictions.
-
How to Use KMPlayer 4.2.2.29 Crack?
-
The basic functions and settings of KMPlayer
-
KMPlayer is easy to use and has a simple interface that allows you to access its functions and settings with ease. Here are some of the basic functions and settings of KMPlayer:
-
-
To open a media file with KMPlayer, you can either drag and drop it onto the player window, or click on the "Open File" button on the top left corner, or press Ctrl+O on your keyboard. You can also open a folder, a disc, or a URL with KMPlayer.
-
To play, pause, stop, or skip a media file, you can use the buttons on the bottom of the player window, or press the spacebar, Esc, or arrow keys on your keyboard. You can also use the mouse wheel to adjust the volume or seek the playback position.
-
To access the menu of KMPlayer, you can right-click on the player window and select the option you want. You can also press F2 on your keyboard to open the preferences window, where you can change various settings of KMPlayer.
-
To switch between different modes of KMPlayer, such as full screen, compact, or skin mode, you can click on the buttons on the top right corner of the player window, or press F4, F5, or F6 on your keyboard. You can also customize the appearance of KMPlayer with different skins and themes.
-
-
These are some of the basic functions and settings of KMPlayer that you can use to play media files. However, KMPlayer has many more features that you can explore and use to enhance your media experience.
-
How to play various media formats with KMPlayer
-
KMPlayer can play almost any media format that you can think of, thanks to its internal and external codecs. Here are some examples of how to play various media formats with KMPlayer:
-
-
To play a video file with KMPlayer, you can simply open it with KMPlayer and enjoy it. You can also adjust the video quality, aspect ratio, zoom level, brightness, contrast, etc., by using the menu options or keyboard shortcuts. You can also enable or disable subtitles, change their size and position, or load external subtitle files.
-
To play an audio file with KMPlayer, you can also open it with KMPlayer and listen to it. You can also adjust the audio quality, volume level, balance, equalizer, etc., by using the menu options or keyboard shortcuts. You can also enable or disable visualizations, change their type and color, or load external visualization files.
-
To play a 3D video file with KMPlayer, you need to have a 3D display device and glasses. You can then open the 3D video file with KMPlayer and select the 3D mode from the menu. You can choose from various 3D modes, such as side by side, top and bottom, anaglyph, etc., depending on your 3D device and preference. You can also adjust the 3D depth and convergence by using the menu options or keyboard shortcuts.
-
To play a 4K video file with KMPlayer , you need to have a 4K display device and a powerful PC. You can then open the 4K video file with KMPlayer and enjoy the high-resolution and high-quality video. You can also adjust the 4K video settings, such as scaling, sharpening, deinterlacing, etc., by using the menu options or keyboard shortcuts.
-
-
These are some examples of how to play various media formats with KMPlayer. You can also play other media formats, such as DVD, Blu-ray, CD, ISO, etc., with KMPlayer. You can also play online videos or radio stations with KMPlayer by using their URL.
-
How to capture audio, video, and screenshots with KMPlayer
-
KMPlayer also has a built-in capture function that lets you record audio, video, or screenshots from the media files that you are playing. You can use this function to save your favorite moments or scenes from the media files. Here are the steps to capture audio, video, and screenshots with KMPlayer:
-
-
Open the media file that you want to capture with KMPlayer and play it.
-
To capture audio, click on the "Capture" button on the bottom of the player window and select "Audio Capture". You can also press Ctrl+Alt+A on your keyboard. A window will pop up where you can choose the format, quality, and destination of the audio file. Click on "Start" to begin capturing audio and click on "Stop" to end it.
-
To capture video, click on the "Capture" button on the bottom of the player window and select "Video Capture". You can also press Ctrl+Alt+V on your keyboard. A window will pop up where you can choose the format, quality, size, frame rate, and destination of the video file. Click on "Start" to begin capturing video and click on "Stop" to end it.
-
To capture screenshots, click on the "Capture" button on the bottom of the player window and select "Screen Capture". You can also press Ctrl+Alt+C on your keyboard. A window will pop up where you can choose the format, quality, size, and destination of the image file. Click on "Capture" to take a screenshot of the current frame.
-
-
You can find the captured files in the destination folder that you have chosen. You can also edit them with KMPlayer's built-in editor or other programs.
-
What are the Pros and Cons of KMPlayer 4.2.2.29 Crack?
-
The advantages of using KMPlayer over other media players
-
KMPlayer is a versatile and powerful media player that has many advantages over other media players. Some of them are:
-
-
It supports a wide range of media formats without requiring additional codecs or filters.
-
It has many features and functions that enhance the media playback experience.
-
It has a user-friendly and customizable interface that suits different preferences and tastes.
-
It has a low CPU and memory usage that does not affect the performance of your PC.
-
It is free to download and use for personal use.
-
-
The disadvantages or limitations of using KMPlayer
-
KMPlayer is not perfect and has some disadvantages or limitations that you should be aware of before using it. Some of them are:
-
-
It may not be compatible with some media formats or devices that require specific codecs or drivers.
-
It may have some bugs or errors that cause crashes or freezes during playback.
-
It may have some security vulnerabilities that expose your PC to malware or hackers.
-
It may have some legal issues that violate the intellectual property rights of the developers or publishers.
-
It may have some ethical issues that harm the development and innovation of the software industry.
-
-
Conclusion
-
KMPlayer 4.2.2.29 Crack is a versatile and powerful media player that can play almost any media format without requiring additional codecs or filters. It also has many features and functions that enhance the media playback experience. However, it also has some disadvantages or limitations that you should be aware of before using it. Moreover, cracking software is illegal and unethical, as it violates the intellectual property rights of the developers and harms the software industry. Therefore, we do not recommend cracking software and advise you to use it at your own risk.
-
If you want to enjoy all the features of KMPlayer without any limitations or risks, you should buy its official version from its website or authorized distributors. You will also get technical support and updates from the developers. You will also support the development and innovation of the software industry. You will also enjoy a better and safer media playback experience with KMPlayer.
-
FAQs
-
Here are some frequently asked questions about KMPlayer 4.2.2.29 Crack:
-
Q: Is KMPlayer safe to use?
-
A: KMPlayer is safe to use if you download it from its official website or other trusted sources. However, if you download it from untrusted sources or crack it with a crack file, you may expose your PC to malware or hackers. Therefore, you should always scan the downloaded file with an antivirus program before opening it.
-
Q: Is KMPlayer legal to use?
-
A: KMPlayer is legal to use for personal use if you download it from its official website or other authorized distributors. However, if you download it from untrusted sources or crack it with a crack file, you may violate the intellectual property rights of the developers or publishers. Therefore, you should respect the license agreement and terms of use of KMPlayer and avoid cracking software.
-
Q: Is KMPlayer better than VLC?
-
A: KMPlayer and VLC are both popular and powerful media players that can play various media formats without requiring additional codecs or filters. They also have many features and functions that enhance the media playback experience. However, they have some differences in terms of compatibility, performance, interface, customization, etc., that may suit different preferences and needs. Therefore, you should try both of them and see which one works better for you.
-
Q: How to update KMPlayer?
-
A: To update KMPlayer, you can either check for updates manually or enable automatic updates. To check for updates manually, you can click on the "Help" button on the top right corner of the player window and select "Check for Updates". You will be notified if there is a new version available and you can download and install it. To enable automatic updates, you can click on the "Help" button on the top right corner of the player window and select "Preferences". Then, go to "General" and check the box that says "Check for updates automatically". You will be notified when there is a new version available and you can choose to download and install it or not.
-
Q: How to uninstall KMPlayer?
-
A: To uninstall KMPlayer, you can follow these steps:
-
-
Close KMPlayer if it is running.
-
Go to the Control Panel and select "Programs and Features".
-
Find KMPlayer from the list of programs and click on "Uninstall".
-
Follow the instructions on the screen to complete the uninstallation process.
-
Delete any leftover files or folders of KMPlayer from your PC.
-
-
You have successfully uninstalled KMPlayer from your PC.
b2dd77e56b
-
-
\ No newline at end of file
diff --git a/spaces/tomofi/ABINet-OCR/app.py b/spaces/tomofi/ABINet-OCR/app.py
deleted file mode 100644
index 463a2cf5b489636f3020d63e91db48b5efda5183..0000000000000000000000000000000000000000
--- a/spaces/tomofi/ABINet-OCR/app.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import os
-os.system('pip install --upgrade gdown')
-import gdown
-gdown.download(id='1mYM_26qHUom_5NU7iutHneB_KHlLjL5y', output='workdir.zip')
-os.system('unzip workdir.zip')
-
-import glob
-import gradio as gr
-from demo import get_model, preprocess, postprocess, load
-from utils import Config, Logger, CharsetMapper
-
-def process_image(image):
- config = Config('configs/train_abinet.yaml')
- config.model_vision_checkpoint = None
- model = get_model(config)
- model = load(model, 'workdir/train-abinet/best-train-abinet.pth')
- charset = CharsetMapper(filename=config.dataset_charset_path, max_length=config.dataset_max_length + 1)
-
- img = image.convert('RGB')
- img = preprocess(img, config.dataset_image_width, config.dataset_image_height)
- res = model(img)
- return postprocess(res, charset, 'alignment')[0][0]
-
-title = "Interactive demo: ABINet"
-description = "Demo for ABINet, ABINet uses a vision model and an explicit language model to recognize text in the wild, which are trained in end-to-end way. The language model (BCN) achieves bidirectional language representation in simulating cloze test, additionally utilizing iterative correction strategy. To use it, simply upload a (single-text line) image or use one of the example images below and click 'submit'. Results will show up in a few seconds."
-article = "