diff --git a/spaces/101-5/gpt4free/app.py b/spaces/101-5/gpt4free/app.py
deleted file mode 100644
index 23e3a59d76381e8f30904722f52f1ac57285a006..0000000000000000000000000000000000000000
--- a/spaces/101-5/gpt4free/app.py
+++ /dev/null
@@ -1,172 +0,0 @@
-import g4f
-import gradio as gr
-from g4f.Provider import (
- Ails,
- You,
- Bing,
- Yqcloud,
- Theb,
- Aichat,
- Bard,
- Vercel,
- Forefront,
- Lockchat,
- Liaobots,
- H2o,
- ChatgptLogin,
- DeepAi,
- GetGpt
-)
-import os
-import json
-import pandas as pd
-
-from models_for_langchain.model import CustomLLM
-from langchain.memory import ConversationBufferWindowMemory, ConversationTokenBufferMemory
-from langchain import LLMChain, PromptTemplate
-from langchain.prompts import (
- ChatPromptTemplate,
- PromptTemplate,
- SystemMessagePromptTemplate,
- AIMessagePromptTemplate,
- HumanMessagePromptTemplate,
-)
-
-provider_dict = {
- 'Ails': Ails,
- 'You': You,
- 'Bing': Bing,
- 'Yqcloud': Yqcloud,
- 'Theb': Theb,
- 'Aichat': Aichat,
- 'Bard': Bard,
- 'Vercel': Vercel,
- 'Forefront': Forefront,
- 'Lockchat': Lockchat,
- 'Liaobots': Liaobots,
- 'H2o': H2o,
- 'ChatgptLogin': ChatgptLogin,
- 'DeepAi': DeepAi,
- 'GetGpt': GetGpt
-}
-
-prompt_set_list = {}
-for prompt_file in os.listdir("prompt_set"):
- key = prompt_file
- if '.csv' in key:
- df = pd.read_csv("prompt_set/" + prompt_file)
- prompt_dict = dict(zip(df['act'], df['prompt']))
- else:
- with open("prompt_set/" + prompt_file, encoding='utf-8') as f:
- ds = json.load(f)
- prompt_dict = {item["act"]: item["prompt"] for item in ds}
- prompt_set_list[key] = prompt_dict
-
-with gr.Blocks() as demo:
- llm = CustomLLM()
-
- template = """
- Chat with human based on following instructions:
- ```
- {system_instruction}
- ```
- The following is a conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
- {{chat_history}}
- Human: {{human_input}}
- Chatbot:"""
-
- memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history")
-
- chatbot = gr.Chatbot([], label='AI')
- msg = gr.Textbox(value="", label='请输入:')
- with gr.Row():
- clear = gr.Button("清空对话", scale=2)
- chat_mode = gr.Checkbox(value=True, label='聊天模式', interactive=True, scale=1)
- system_msg = gr.Textbox(value="你是一名助手,可以解答问题。", label='系统提示')
- with gr.Row():
- default_prompt_set = "1 中文提示词.json"
- prompt_set_name = gr.Dropdown(prompt_set_list.keys(), value=default_prompt_set, label='提示词集合')
- prompt_name = gr.Dropdown(prompt_set_list[default_prompt_set].keys(), label='提示词', min_width=20)
- with gr.Row():
- model_name = gr.Dropdown(['gpt-3.5-turbo', 'gpt-4'], value='gpt-3.5-turbo', label='模型')
- provider_name = gr.Dropdown(provider_dict.keys(), value='GetGpt', label='提供者', min_width=20)
-
- def change_prompt_set(prompt_set_name):
- return gr.Dropdown.update(choices=list(prompt_set_list[prompt_set_name].keys()))
-
- def change_prompt(prompt_set_name, prompt_name):
- return gr.update(value=prompt_set_list[prompt_set_name][prompt_name])
-
- def user(user_message, history = []):
- return gr.update(value="", interactive=False), history + [[user_message, None]]
-
- def bot(history, model_name, provider_name, system_msg, chat_mode):
- history[-1][1] = ''
- if len(system_msg)>3000:
- system_msg = system_msg[:2000] + system_msg[-1000:]
-
- if not chat_mode:
- global template, memory
- llm.model_name = model_name
- llm.provider_name = provider_name
- prompt = PromptTemplate(
- input_variables=["chat_history", "human_input"], template=template.format(system_instruction=system_msg)
- )
- llm_chain = LLMChain(
- llm=llm,
- prompt=prompt,
- verbose=False,
- memory=memory,
- )
- bot_msg = llm_chain.run(history[-1][0])
- for c in bot_msg:
- history[-1][1] += c
- yield history
- else:
- prompt = """
- 请你仔细阅读以下提示,然后针对用户的话进行回答。
- 提示:
- ```
- {}
- ```
- 用户最新的话:
- ```
- {}
- ```
- 请回答:
- """
-
- # print(history)
- messages = []
- for user_message, assistant_message in history[:-1]:
- messages.append({"role": "user", "content": user_message})
- messages.append({"role": "assistant", "content": assistant_message})
- messages.append({"role": "user", "content": history[-1][0]})
- # print(messages)
-
- bot_msg = g4f.ChatCompletion.create(
- model=model_name,
- provider=provider_dict[provider_name],
- messages=messages,
- stream=True)
- for c in bot_msg:
- history[-1][1] += c
- print(c, flush=True, end='')
- yield history
-
- def empty_chat():
- global memory
- memory = ConversationBufferWindowMemory(k=10, memory_key="chat_history")
- return None
- response = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
- bot, [chatbot, model_name, provider_name, system_msg, chat_mode], chatbot
- )
- prompt_set_name.select(change_prompt_set, prompt_set_name, prompt_name)
- prompt_name.select(change_prompt, [prompt_set_name, prompt_name], system_msg)
-
- response.then(lambda: gr.update(interactive=True), None, [msg], queue=False)
- clear.click(empty_chat, None, [chatbot], queue=False)
-
-demo.title = "AI Chat"
-demo.queue()
-demo.launch()
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ArcGIS 10.8 Full Crack Kuyhaa - A Powerful and Easy-to-Use GIS Software for Your PC.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ArcGIS 10.8 Full Crack Kuyhaa - A Powerful and Easy-to-Use GIS Software for Your PC.md
deleted file mode 100644
index bf2afbc972824af6e9a169057b10f692d563899a..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/ArcGIS 10.8 Full Crack Kuyhaa - A Powerful and Easy-to-Use GIS Software for Your PC.md
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
ArcGIS 10.8 is a popular and widely used geographic information system (GIS) software that allows you to work with maps and spatial data in various forms and formats. With ArcGIS 10.8, you can create, edit, and display spatial data, as well as perform spatial analysis, data management, visualization, and geoprocessing.
-However, ArcGIS 10.8 is not a free software and requires a license to use it. If you want to use ArcGIS 10.8 for free, you can download the full crack version from Kuyhaa, a website that provides various software and games for free. In this article, we will show you how to download ArcGIS 10.8 full crack Kuyhaa and use it for your GIS tasks.
-DOWNLOAD ››› https://byltly.com/2uKyxs
Downloading ArcGIS 10.8 full crack Kuyhaa is easy and fast. Just follow these steps:
-Using ArcGIS 10.8 for GIS tasks is fun and easy. Here are some basic steps to get you started:
-ArcGIS 10.8 has many advanced features that can help you enhance your GIS skills and productivity. Here are some tips and tricks for using ArcGIS 10.8:
-Have you ever wanted to get back at your annoying neighbour for making your life miserable? If so, you might enjoy playing Neighbours From Hell 6, a comedy strategy game that lets you prank your neighbour in various ways. In this article, we will show you how to download Neighbours From Hell 6 for free, give you some tips and tricks for playing it, and review its features, pros and cons.
-DOWNLOAD »»» https://byltly.com/2uKz7N
Neighbours From Hell 6 is the sixth installment of the popular Neighbours From Hell series, which was developed by Orion Games and released in 2009. The game is not available on any official platforms, such as Steam or GOG, but you can still download it for free from some unofficial sources. Here are the steps you need to follow:
-Find a reliable source for the game file. You can search online for websites that offer free downloads of Neighbours From Hell 6, but be careful not to click on any suspicious links or ads that might contain malware or viruses. One of the websites that we recommend is this one, which has a video tutorial on how to get the game file.
Download and install the game file. Once you have found a trustworthy source, you can download the game file, which is usually in a compressed format, such as ZIP or RAR. You will need a program like WinRAR or 7-Zip to extract the file to a folder on your computer. Then, you can run the setup.exe file and follow the instructions to install the game.
Enjoy playing Neighbours From Hell 6. After installing the game, you can launch it from your desktop or start menu and start pranking your neighbour. You can also adjust the settings, such as resolution, sound, and language, according to your preferences.
Neighbours From Hell 6 is a fun and easy game to play, but it can also be challenging if you want to achieve a high score and unlock all the achievements. Here are some tips and tricks that might help you:
-Plan your pranks carefully. The game consists of 14 episodes, each with a different setting and a number of pranks that you can perform on your neighbour. You have a limited time to complete each episode, so you need to plan your pranks ahead and execute them in the right order. You can use the eye icon to spy on your neighbour's movements and actions, and use the pause button to think about your next move.
Use the environment to your advantage. The game offers a variety of pranks and items that you can use to annoy your neighbour, such as glue, pepper, soap, scissors, etc. You can also interact with some objects in the environment, such as doors, windows, switches, faucets, etc., to create more chaos. For example, you can turn on the water while your neighbour is showering, or cut off the electricity while he is watching TV.
Watch out for the dog and other obstacles. Your neighbour is not the only one who can ruin your plans. There are also some obstacles that you need to avoid or overcome, such as his dog Chilli, who will chase you if he sees you; his girlfriend Olga, who will slap you if she catches you; or his mother Rottweiler, who will scold him if she finds out what he is doing. You can use some items or tricks to distract them or get rid of them temporarily.
-Download Neighbours From Hell 6 Origastock
-Play Neighbours Back From Hell on PC
-Neighbours Back From Hell Android Emulator
-Neighbours From Hell 6 Game Free Download
-Neighbours From Hell 6 Puzzle Game
-Neighbours From Hell 6 Holiday Locations
-Neighbours From Hell 6 Prank Your Neighbor
-Neighbours From Hell 6 TV Show Game
-Neighbours From Hell 6 HandyGames
-Neighbours From Hell 6 BlueStacks
-Download Neighbours From Hell Season 1
-Neighbours From Hell Season 1 LDPlayer
-Neighbours From Hell Season 1 PC Game
-Neighbours From Hell Season 1 Free Download
-Neighbours From Hell Season 1 Android Game
-How to Download Neighbours From Hell 6
-How to Play Neighbours Back From Hell
-How to Install Neighbours From Hell Season 1
-How to Prank Your Neighbor in Neighbours From Hell 6
-How to Win Awards in Neighbours Back From Hell
-Best Pranks in Neighbours From Hell 6
-Best Episodes in Neighbours Back From Hell
-Best Android Emulator for Neighbours From Hell Games
-Best PC Games Like Neighbours From Hell
-Best Tips and Tricks for Neighbours From Hell Games
-Download Neighbours From Hell Complete Collection
-Play All Seasons of Neighbours From Hell on PC
-Neighbours From Hell Games for Windows 10
-Neighbours From Hell Games for Mac OS
-Neighbours From Hell Games for Linux
-Download Neighbours From Hell APK File
-Download Neighbours Back From Hell MOD APK
-Download Neighbours From Hell Season 1 OBB Data
-Download Neighbours From Hell Full Version Crack
-Download Neighbours Back From Hell Patch Update
-Download Neighbours From Hell Soundtrack MP3
-Download Neighbours Back From Hell Wallpaper HD
-Download Neighbours From Hell Comics PDF
-Download Neighbours Back From Hell Cheats and Hacks
-Download Neighbours From Hell Walkthrough Guide
Neighbours From Hell 6 is a game that offers a lot of features that make it enjoyable and entertaining. Here are some of them:
-14 hilarious episodes with different settings. The game takes you to various locations where you can prank your neighbour, such as his house, his office, his hotel room, his cruise ship cabin, his ski resort chalet, etc. Each episode has its own theme and atmosphere, as well as unique pranks and items that you can use.
A variety of pranks and items to use. The game gives you a lot of options to choose from when it comes to pranking your neighbour. You can use simple items like glue or pepper, or more elaborate ones like fireworks or dynamite. You can also combine some items to create more effects or damage. For example, you can put glue on his chair and then cut his pants with scissors.
A catchy soundtrack and funny sound effects. The game has a catchy soundtrack that matches the mood of each episode. It also has funny sound effects that add more humor to the game. You can hear your neighbour's screams, groans, curses, etc., as well as his reactions to your pranks.
Neighbours From Hell 6 is a game that has its pros and cons. Here are some of them:
-Pros | -Cons | -
---|---|
A fun and addictive gameplay. The game is easy to play but hard to master. It requires strategy, timing, creativity, and patience. It also has a replay value because you can try different pranks or aim for higher scores. |
-A repetitive and predictable pattern. The game follows a similar pattern in each episode. You have to spy on your neighbour's routine, find items in hidden places, set up pranks in specific spots, etc. The neighbour's behaviour is also predictable after a while. |
-
A humorous and original concept. The game has a unique concept that makes it stand out from other games. It is based on a TV show with the same name that aired in Germany in 2003-2005. It is also inspired by real-life situations that many people can relate to. |
-A lack of difficulty and challenge. The game is not very challenging because it does not have any penalties or consequences for failing an episode or getting caught by your neighbour. You can simply restart or retry until you succeed. The game also does not have any difficulty levels or modes that could make it more challenging. |
-
A low system requirement and easy installation. The game does not require a high-end computer or device to run smoothly. It has low graphics quality and size that make it compatible with most systems. It also has an easy installation process that does not require any additional programs or files. |
-A dated graphics and animation. The game has poor graphics quality and animation that make it look outdated compared to other games released in the same year or later. The characters are pixelated and stiff; the backgrounds are bland and blurry; the movements are slow and unnatural. |
-
Neighbours From Hell 6 is a comedy strategy game that lets you prank your neighbour in various ways. You can download it for free from some unofficial sources and enjoy playing it on your computer. The game has a fun and addictive gameplay, a humorous and original concept, and a catchy soundtrack and sound effects. However, it also has some drawbacks, such as a repetitive and predictable pattern, a lack of difficulty and challenge, and a dated graphics and animation. Overall, Neighbours From Hell 6 is a game that can make you laugh and have a good time, but it might not appeal to everyone.
-If you are interested in playing Neighbours From Hell 6, you can follow the steps we provided in this article and start pranking your neighbour today. You can also check out some tips and tricks we shared to help you improve your performance and score. And if you want to learn more about the features, pros and cons of Neighbours From Hell 6, you can read our review and see if it suits your taste.
-We hope you enjoyed reading this article and found it useful. If you did, please share it with your friends and family who might also like to play Neighbours From Hell 6. And if you have any questions or feedback, please leave them in the comments section below. We would love to hear from you.
-Thank you for reading and happy pranking!
-Q: What is the difference between Neighbours From Hell 6 and other Neighbours From Hell games?
-A: Neighbours From Hell 6 is the sixth installment of the series, which was released in 2009. It has 14 episodes with different settings, such as a cruise ship, a ski resort, a casino, etc. It also has some new pranks and items that were not available in previous games.
Q: Is Neighbours From Hell 6 safe to download?
-A: Neighbours From Hell 6 is not available on any official platforms, such as Steam or GOG, but you can still download it for free from some unofficial sources. However, you need to be careful not to click on any suspicious links or ads that might contain malware or viruses. You should also scan the game file with an antivirus program before installing it.
Q: How long does it take to finish Neighbours From Hell 6?
-A: It depends on your skill level and how much time you spend on each episode. Each episode has a time limit that ranges from 5 to 15 minutes. If you complete all the episodes with 100% score, it might take you around 3 to 4 hours to finish the game.
Q: Can I play Neighbours From Hell 6 online or with friends?
-A: No, Neighbours From Hell 6 is a single-player game that does not have any online or multiplayer features. You can only play it offline on your computer.
Q: Where can I find more information about Neighbours From Hell 6?
-A: You can find more information about Neighbours From Hell 6 on some websites that offer free downloads of the game, such as this one. You can also watch some videos on YouTube that show the gameplay and walkthrough of the game.
If you are a flight simulator enthusiast, you probably know how important it is to have the most accurate and up-to-date navigation data for your flights. Navigation data includes information such as waypoints, airways, navaids, procedures, and more. Without it, you might end up flying to the wrong destination, missing an approach, or violating airspace restrictions.
-DOWNLOAD » https://imgfil.com/2uxXAQ
That's why you need to update your flight simulator with the latest AIRAC cycle. AIRAC stands for Aeronautical Information Regulation And Control, and it is a system that ensures that all aeronautical information is published and updated at regular intervals. Every 28 days, a new AIRAC cycle is released with the latest changes and corrections to the navigation data.
- -But how do you update your flight simulator with the new AIRAC cycle? That's where AIRAC Cycle 1210 (complete) [FSX, FS9, X-Plane] Demol comes in. This is a package that contains all the navigation data files for three popular flight simulators: Microsoft Flight Simulator X (FSX), Microsoft Flight Simulator 2004 (FS9), and X-Plane. It also includes a tool called Demol that allows you to easily install the files into your simulator.
- -In this article, we will show you how to use AIRAC Cycle 1210 (complete) [FSX, FS9, X-Plane] Demol to update your flight simulator with the latest navigation data. Follow these simple steps and enjoy more realistic and accurate flights.
- -The first step is to download the package from the link below. The package is a ZIP file that contains all the navigation data files for FSX, FS9, and X-Plane, as well as the Demol tool. The file size is about 1.2 GB, so make sure you have enough space on your hard drive and a stable internet connection.
- - -Download AIRAC Cycle 1210 (complete) [FSX, FS9, X-Plane] Demol
- -The next step is to extract the ZIP file to a folder on your computer. You can use any software that can handle ZIP files, such as WinZip, WinRAR, or 7-Zip. To extract the file, right-click on it and select "Extract All" or "Extract Here". Choose a destination folder where you want to save the extracted files.
- -The third step is to run Demol.exe from the extracted folder. This is a tool that will help you install the navigation data files into your flight simulator. When you run Demol.exe, you will see a window like this:
- -As you can see, Demol has four tabs: FSX/P3D, FS9/FS2004, X-Plane 10/11/12/13/14/15/16/17/18/19/20/21/22/23/24/25/26/27/28/29/30 - -
The fourth step is to select your flight simulator and install the navigation data files. To do this, click on the tab that corresponds to your simulator: FSX/P3D, FS9/FS2004, or X-Plane. You will see a window like this:
- -In this example, we will use FSX as our simulator. You can see that Demol has detected the location of our FSX installation folder. If Demol does not find your simulator folder automatically, you can browse for it manually by clicking on the "..." button.
- -Next, you need to select the navigation data files that you want to install. You can choose between two options: "Install all files" or "Install selected files". If you choose "Install all files", Demol will install all the navigation data files for your simulator. This includes the files for the default aircraft and scenery, as well as for any add-ons that you have installed. This option is recommended if you want to have the most complete and updated navigation data for your simulator.
- -If you choose "Install selected files", Demol will let you choose which files you want to install. This option is useful if you only want to update certain parts of your simulator, such as specific add-ons or regions. To select the files, click on the "Select Files" button. You will see a window like this:
- -Here, you can browse through the folders and subfolders that contain the navigation data files. You can check or uncheck the boxes next to each file to select or deselect it. You can also use the buttons at the bottom to select or deselect all files in a folder or subfolder.
- -Once you have selected the files that you want to install, click on the "OK" button to return to the main window. You will see a summary of the files that you have selected at the bottom of the window.
- -Finally, click on the "Install" button to start installing the navigation data files into your simulator. Demol will show you a progress bar and a log of the installation process. Depending on the number and size of the files that you have selected, this may take some time.
- -The fifth and final step is to enjoy your updated flight simulator with the latest AIRAC cycle. Once Demol has finished installing the files, it will show you a message saying "Installation completed successfully". You can close Demol and launch your flight simulator.
- -You will notice that your flight simulator now has more accurate and up-to-date navigation data for your flights. You can check this by looking at the map, GPS, FMC, or any other navigation device in your aircraft. You can also use online tools such as Navigraph or AIRAC to compare and verify the navigation data.
- -Congratulations! You have successfully updated your flight simulator with AIRAC Cycle 1210 (complete) [FSX, FS9, X-Plane] Demol. Now you can enjoy more realistic and accurate flights with your favorite simulator.
d5da3c52bfDownload Zip » https://imgfil.com/2uxZJD
Download Zip ===> https://imgfil.com/2uy0Bd
DOWNLOAD ——— https://imgfil.com/2uxXNG
This document was downloaded from Lit2Go, a free online collection of stories and poems in Mp3 (audiobook) format published by the Florida Center for Instructional Technology. For more information, including classroom activities, readability data, and original sources, please visit -tragedy-of-macbeth/5576/act-5-scene-5/.
-Download Zip 🗹 https://imgfil.com/2uxXa0
Warner Bros. Pictures is hosting its own streaming application for the nominated films below. You can stream directly from the web or you can download the application to your desktop, phone or other devices prior to streaming. To access please click the graphic below and either enter your Awards PIN in the "Access Code" field or once the app is downloaded, enter your Awards PIN in the "Login Code" field.
-The Screen Actors Guild Awards support efforts to eliminate the theft of copyrighted materials, as content theft threatens the economic livelihood of all entertainment industry professionals, especially working actors who depend on residuals to make a living. Screener DVDs, digital downloads, and streaming offers are provided to members for personal viewing in connection with awards consideration only and must not be uploaded to the internet, publicly exhibited, distributed, rented, loaned, sold, reproduced or given to anyone. The unauthorized use of copyrighted materials violates state and/or federal laws and may result in civil and/or criminal liability. It may also constitute grounds for discipline, including expulsion from SAG-AFTRA.
aaccfb2cb3If you are a fan of car racing and stunt games, you might have heard of Car Stunt 3D Extreme City. This is a thrilling and addictive game that lets you perform amazing stunts in a fantasy city. You can drive various cars, customize them, and challenge yourself with different tracks and missions. But what if you want to enjoy the game without any limitations or interruptions? That's where Car Stunt 3D Extreme City Mod Apk comes in. In this article, we will review this modded version of the game, tell you how to download and install it, and explain its benefits.
-Car Stunt 3D Extreme City is a simulation game developed by Timuz Games. It is available for Android devices on Google Play Store. The game has over 10 million downloads and a rating of 4.0 out of 5 stars. The game is designed to test your driving skills and creativity in a realistic and immersive environment. You can choose from a variety of cars, each with its own features and specifications. You can also customize your car with different colors, wheels, stickers, and more. The game offers multiple tracks, each with its own obstacles, ramps, loops, bridges, and tunnels. You can perform stunning stunts like flips, jumps, drifts, and spins. The game also has different modes, such as free mode, time mode, challenge mode, and multiplayer mode. You can compete with other players online or offline, and earn rewards for completing missions.
-Download ★ https://jinyurl.com/2uNK1e
One of the best features of Car Stunt 3D Extreme City is its realistic physics and graphics. The game uses advanced physics engine to simulate the movement and behavior of the cars. You can feel the impact of gravity, friction, inertia, and momentum as you drive your car. The game also has stunning graphics that create a lifelike cityscape. You can see the details of the buildings, roads, trees, sky, and water. The game also has dynamic lighting and shadows that enhance the visual effects.
-Another feature of Car Stunt 3D Extreme City is its multiple cars and tracks. The game offers more than 20 cars to choose from, including sports cars, muscle cars, monster trucks, and more. Each car has its own speed, acceleration, handling, braking, and durability. You can also customize your car with different colors, wheels, stickers, and more. The game also has more than 100 tracks to explore, each with its own obstacles, ramps, loops, bridges, and tunnels. You can find different themes for the tracks, such as city, desert, snow, forest, and more.
-A third feature of Car Stunt 3D Extreme City is its challenging stunts and missions. The game allows you to perform amazing stunts like flips, jumps, drifts, and spins. You can also use nitro boosters to increase your speed and power. The game also has different modes to test your skills and creativity. You can play in free mode to explore the tracks at your own pace. You can play in time mode to race against the clock and beat your best time. You can play in challenge mode to complete various tasks and objectives. You can also play in multiplayer mode to compete with other players online or offline. The game also has a leaderboard and achievements system to track your progress and performance.
-A fourth feature of Car Stunt 3D Extreme City is its customizable controls and settings. The game allows you to choose from different control options, such as tilt, steering wheel, buttons, or joystick. You can also adjust the sensitivity and calibration of the controls to suit your preference. The game also has different settings to optimize your gaming experience. You can change the graphics quality, sound effects, music, and language of the game. You can also enable or disable the vibration, camera shake, and nitro effects.
-To download and install Car Stunt 3D Extreme City Mod Apk, you need to have an Android device that meets the following requirements: - Android version: 4.4 or higher - RAM: 2 GB or more - Storage: 100 MB or more - Internet connection: required for multiplayer mode The game is compatible with most Android devices, including smartphones and tablets. However, some devices may not support the game or run it smoothly due to hardware limitations.
-To download and install Car Stunt 3D Extreme City Mod Apk, you need to follow these steps: - Step 1: Go to a trusted website that provides the modded version of the game. You can search for "Car Stunt 3D Extreme City Mod Apk" on Google or any other search engine. - Step 2: Download the modded apk file from the website. Make sure you download the latest version of the mod that matches your device's specifications. - Step 3: Before installing the modded apk file, you need to enable the "Unknown Sources" option on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. - Step 4: Locate the downloaded modded apk file on your device's file manager and tap on it to start the installation process. Follow the instructions on the screen to complete the installation. - Step 5: Once the installation is done, you can launch the game from your app drawer or home screen. Enjoy playing Car Stunt 3D Extreme City Mod Apk with unlimited money and gems, all cars and tracks unlocked, no ads and pop-ups, and enhanced performance and stability.
-extreme car stunts 3d mod apk download
-car stunt 3d extreme city hack apk
-car stunt 3d extreme city mod apk unlimited money
-extreme car stunts 3d mod apk android 1
-car stunt 3d extreme city game mod apk
-extreme car stunts 3d mod apk latest version
-car stunt 3d extreme city mod apk revdl
-extreme car stunts 3d mod apk free download
-car stunt 3d extreme city mod apk offline
-extreme car stunts 3d mod apk unlocked all cars
-car stunt 3d extreme city mod apk rexdl
-extreme car stunts 3d mod apk happymod
-car stunt 3d extreme city mod apk unlimited coins
-extreme car stunts 3d mod apk no ads
-car stunt 3d extreme city mod apk online
-extreme car stunts 3d mod apk pure
-car stunt 3d extreme city mod apk obb
-extreme car stunts 3d mod apk old version
-car stunt 3d extreme city mod apk for pc
-extreme car stunts 3d mod apk vip
-car stunt 3d extreme city mod apk new version
-extreme car stunts 3d mod apk full version
-car stunt 3d extreme city mod apk android oyun club
-extreme car stunts 3d mod apk unlimited everything
-car stunt 3d extreme city mod apk uptodown
-extreme car stunts 3d mod apk all levels unlocked
-car stunt 3d extreme city mod apk apkpure
-extreme car stunts 3d mod apk unlimited gems
-car stunt 3d extreme city mod apk an1
-extreme car stunts 3d mod apk mega mod
-car stunt 3d extreme city mod apk apkmody
-extreme car stunts 3d mod apk unlimited nitro
-car stunt 3d extreme city mod apk apkmirror
-extreme car stunts 3d mod apk god mode
-car stunt 3d extreme city mod apk highly compressed
-extreme car stunts 3d mod apk premium
-car stunt 3d extreme city mod apk hack download
-extreme car stunts 3d mod apk pro
-car stunt 3d extreme city mod apk latest update
-extreme car stunts 3d mod apk original
-car stunt 3d extreme city mod apk unlimited keys
-extreme car stunts 3d mod apk cracked
-car stunt 3d extreme city mod apk unlimited lives
-extreme car stunts 3d mod apk cheat codes
-car stunt 3d extreme city mod apk unlimited gold
-extreme car stunts 3d mod apk easy download
-car stunt 3d extreme city mod apk ios
-extreme car stunts 3d mod apk no root
-car stunt 3d extreme city mod apk play store
To run Car Stunt 3D Extreme City Mod Apk, you need to grant some permissions to the game. These permissions include: - Access to device storage: to read and write game data - Access to device location: to provide location-based services - Access to device camera: to take screenshots and record videos - Access to device microphone: to enable voice chat in multiplayer mode These permissions are necessary for the game to function properly and provide you with a better gaming experience. However, you should be careful about downloading and installing modded apk files from unknown sources. Some modded apk files may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should always download modded apk files from trusted websites that have positive reviews and ratings from other users. You should also scan the modded apk file with an antivirus software before installing it on your device.
-One of the benefits of Car Stunt 3D Extreme City Mod Apk is that it gives you unlimited money and gems in the game. Money and gems are the main currencies in the game that you can use to buy new cars, upgrade them, customize them, and unlock new tracks. However, earning money and gems in the game can be time-consuming and tedious. You have to complete missions, win races, watch ads, or spend real money to get them. With Car Stunt 3D Extreme City Mod Apk, you don't have to worry about that anymore. You can get unlimited money and gems for free without any effort. You can use them to buy anything you want in the game without any restrictions.
-Another benefit of Car Stunt 3D Extreme City Mod Apk is that it unlocks all cars and tracks in the game for you. Cars and tracks are the main elements of the game that determine your gameplay experience. However, not all cars and tracks are available for you at the beginning of the game. You have to unlock them by earning money and gems, completing missions, or reaching certain levels. Some cars and tracks are also exclusive to premium users who pay real money to access them. With Car Stunt 3D Extreme City Mod Apk, you don't have to do any of that. You can access all cars and tracks in the game from the start. You can enjoy driving any car you like and exploring any track you want without any limitations.
-A third benefit of Car Stunt 3D Extreme City Mod Apk is that it removes all ads and pop-ups from the game. Ads and pop-ups are annoying and distracting features that interrupt your gameplay and ruin your immersion. They also consume your data and battery life. The game has a lot of ads and pop-ups that appear before, during, and after your gameplay. You have to watch them to earn money and gems, unlock cars and tracks, or access certain features. You can also skip them by paying real money or disabling your internet connection. With Car Stunt 3D Extreme City Mod Apk, you don't have to deal with any of that. You can play the game without any ads and pop-ups bothering you. You can enjoy a smooth and uninterrupted gameplay experience.
-A fourth benefit of Car Stunt 3D Extreme City Mod Apk is that it enhances the performance and stability of the game. The game is a high-quality simulation game that requires a lot of resources and processing power to run smoothly. However, some devices may not be able to handle the game well due to hardware limitations or compatibility issues. The game may lag, crash, freeze, or glitch on some devices. The game may also have some bugs or errors that affect the gameplay quality. With Car Stunt 3D Extreme City Mod Apk, you don't have to worry about any of that. The modded version of the game optimizes the game for your device and fixes any bugs or errors that may occur. You can play the game with high speed, accuracy, and reliability.
-Car Stunt 3D Extreme City is a fun and exciting game that lets you perform amazing stunts in a realistic city environment. You can drive various cars, customize them, and challenge yourself with different tracks and modes. However, if you want to enjoy the game without any limitations or interruptions, you should try Car Stunt 3D Extreme City Mod Apk. This is a modded version of the game that gives you unlimited money and gems, all cars and tracks unlocked, no ads and pop-ups, and enhanced performance and stability. You can download and install Car Stunt 3D Extreme City Mod Apk from a trusted website and follow the steps we provided in this article. You can then enjoy playing Car Stunt 3D Extreme City Mod Apk with all its benefits.
-Here are some frequently asked questions about Car Stunt 3D Extreme City Mod Apk:
-If you are looking for a fun and addictive puzzle game to play on your iPhone or iPad, you might want to check out Candy Crush Saga. This game has been around for almost a decade, and it is still one of the most popular mobile games in the world. In this article, we will tell you what Candy Crush Saga is, how to download and play it on your iOS device, what features it offers, and some tips and tricks to help you crush more candies. We will also suggest some alternatives to Candy Crush Saga in case you want to try something different.
-Candy Crush Saga is a match-three puzzle game developed by King, a leading company in casual gaming. The game was released in 2012 for Facebook, and later for iOS, Android, Windows Phone, and Windows 10. The game has over a trillion levels, each with a different objective and layout. The basic gameplay involves swapping adjacent candies to make matches of three or more of the same color, which will clear them from the board and make way for new ones. Matching more than three candies will create special candies that have various effects, such as clearing a whole row or column, or exploding all candies of a certain color. The game also has different game modes, such as target score, clear the jelly, collect the ingredients, and order mode.
-DOWNLOAD ::: https://jinyurl.com/2uNUtG
Candy Crush Saga is popular because it is simple to play but challenging to master. It appeals to a wide range of players, from casual gamers who want to kill some time, to hardcore gamers who want to compete with their friends and other players around the world. The game also has a colorful and cute design, with catchy music and sound effects. The game is constantly updated with new levels and features, keeping the players engaged and entertained.
-Downloading and playing Candy Crush Saga on your iPhone or iPad is easy and free. Here are the steps you need to follow:
-You can also connect your game to your Facebook account to sync your progress, get more lives, and play with your friends. To do this, tap on the Connect button on the main screen and follow the instructions. You can also invite your friends to play Candy Crush Saga and send or receive lives and boosters from them.
-Candy Crush Saga is not just a simple match-three game. It has many features that make it more fun and exciting. Here are some of them:
-Candy Crush Saga has over a trillion levels, and new ones are added every week. You will never run out of challenges and surprises. The game also has different episodes, each with a unique theme and story. You will meet various characters along the way, such as Tiffi, Mr. Toffee, Odus the Owl, and many more. You will also encounter different types of candies, such as striped, wrapped, color bomb, jelly fish, coconut wheel, and more. Each candy has a different effect when matched or activated.
-Candy Crush Saga rewards you for playing well and being loyal. You can earn stars by completing levels with high scores. You can also collect sugar drops by matching certain candies. These stars and sugar drops can be used to unlock special features and boosters. You can also spin the Daily Booster Wheel every day to get a free booster. You can also participate in events and quests to win more prizes and bonuses. Some of the events and quests are Daily Quests, Sugar Track, Build-a-Bot, Fantastic Five, Sweet Streak, and more.
-Candy Crush Saga has different game modes that test your skills and strategy. Some of the game modes are:
-candy crush saga game download for iphone
-how to install candy crush saga on ios
-candy crush saga free download ios app store
-candy crush saga latest version download for ios
-candy crush saga cheats and tips for ios
-candy crush saga ios download without wifi
-candy crush saga offline mode download for ios
-candy crush saga hack download for ios
-candy crush saga mod apk download for ios
-candy crush saga unlimited lives download for ios
-candy crush saga update download for ios
-candy crush saga old version download for ios
-candy crush saga download size for ios
-candy crush saga download link for ios
-candy crush saga download error on ios
-candy crush saga download from itunes
-candy crush saga compatible with ios 14
-candy crush saga support for ios 15
-candy crush saga requirements for ios devices
-candy crush saga ratings and reviews for ios
-candy crush saga features and benefits for ios users
-candy crush saga alternatives and competitors for ios
-candy crush saga best levels and episodes for ios
-candy crush saga rewards and prizes for ios players
-candy crush saga events and challenges for ios gamers
-candy crush saga friends and community for ios fans
-candy crush soda saga download for ios
-candy crush jelly saga download for ios
-candy crush friends saga download for ios
-candy crush dreamworld saga download for ios
-candy crush all stars download for ios
-candy crush blast download for ios
-candy crush farm heroes download for ios
-candy crush pet rescue download for ios
-candy crush bubble witch download for ios
-how to play candy crush saga on ipad
-how to play candy crush saga on apple watch
-how to play candy crush saga on apple tv
-how to play candy crush saga on macbook
-how to play candy crush saga on imac
-how to sync candy crush saga across ios devices
-how to backup and restore candy crush saga on ios devices
-how to transfer candy crush saga from android to ios devices or vice versa
-how to connect candy crush saga with facebook on ios devices
-how to contact candy crush saga customer service on ios devices
-how to delete or uninstall candy crush saga from ios devices
-how to fix or troubleshoot candy crush saga issues on ios devices
Each game mode has its own challenges and strategies. You need to adapt your moves according to the objective and the layout of the board.
-Candy Crush Saga is a game that you can enjoy alone or with your friends. You can play offline or online, depending on your preference. You can also connect your game to your Facebook account to see how your friends are doing, compare scores, send or receive lives and boosters, and compete in leaderboards and tournaments. You can also join a team with other players and work together to achieve common goals and rewards.
-Candy Crush Saga is a game that requires skill, strategy, and luck. Sometimes, you may get stuck on a level or run out of lives or boosters. Don't worry, we have some tips and tricks that can help you overcome these challenges and have more fun playing the game. Here are some of them:
-One of the keys to success in Candy Crush Saga is to make good use of the special candies and their combinations. Some of the best combos are:
-Some of the worst combos are:
-Special candies are very powerful, but they are also limited. You should use them wisely and strategically. Here are some tips on how to use them:
-Candy Crush Saga is a game that requires you to think ahead and plan your moves carefully. You should not just match candies randomly or impulsively. Here are some tips on how to plan your moves:
-Boosters are very helpful in Candy Crush Saga, but they are also scarce and expensive. You should not use them on easy levels or when you are not sure if they will help you. Here are some tips on how to save your boosters:
-Candy Crush Saga is more fun when you play with your friends. You can connect your game to your Facebook account to enjoy some extra benefits. Here are some of them:
-Candy Crush Saga is a great game, but it is not the only one of its kind. There are many other games that offer similar or different gameplay and features. If you want to try something new, here are some alternatives to Candy Crush Saga that you might like:
-Zookeeper Battle is a match-three puzzle game that pits you against other players in real-time battles. You need to match animal tiles to attack your opponent and defend yourself. You can also use items and skills to gain an edge in the battle. The game has cute graphics and sound effects, and a simple but addictive gameplay. You can play with your friends or with random players from around the world.
-Bejeweled Blitz is a fast-paced match-three puzzle game that challenges you to score as high as possible in 60 seconds. You need to match gems of the same color to clear them from the board and create cascades and combos. You can also use special gems and boosters to increase your score and unleash powerful effects. The game has stunning graphics and sound effects, and a competitive gameplay. You can play with your friends or with millions of players from around the world.
-Two Dots is a minimalist match-three puzzle game that requires you to connect dots of the same color to clear them from the board. You need to complete different objectives in each level, such as breaking ice, dropping anchors, or collecting fireflies. The game has a beautiful design and music, and a relaxing but challenging gameplay. You can play alone or with your friends in co-op mode.
-Futurama: Game of Drones is a match-four puzzle game that features the characters and humor of the popular animated series Futurama. You need to match delivery drones of the same color to clear them from the board and deliver packages. You can also use special drones and power-ups to create explosions and combos. The game has a hilarious story and dialogue, and a fun and addictive gameplay. You can play with your friends or with other players from around the world.
-Candy Crush Saga is a game that you can download and play on your iOS device for free. It is a match-three puzzle game that has over a trillion levels, each with a different objective and game mode. It also has many features that make it more fun and exciting, such as special candies, boosters, events, quests, and more. You can also play with your friends or with other players from around the world. Candy Crush Saga is a game that will keep you craving more.
-If you are ready to join the sweet adventure, download Candy Crush Saga today and start matching candies. You will not regret it.
-Here are some frequently asked questions about Candy Crush Saga:
-You can get more lives in Candy Crush Saga by doing one of the following:
-You can sync your progress across devices by connecting your game to your Facebook account. This will allow you to access your game data on any device or platform that supports Candy Crush Saga.
-You can clear jelly and other obstacles by matching candies on them or near them. Some obstacles may require more than one match to clear, such as double jelly, licorice locks, chocolate, or cake bombs. You can also use special candies or boosters to clear them faster or easier.
-You can get free gold bars in Candy Crush Saga by doing one of the following:
-You can contact the support team for Candy Crush Saga by doing one of the following:
-- You can modify this app directly by editing index.html in the - Files and versions tab. -
-- Also don't forget to check the - Spaces documentation. -
-Si eres un fan de los juegos de puzzle, probablemente hayas oído hablar de Candy Crush Saga, uno de los juegos más populares y adictivos de todos los tiempos. ¿Pero sabías que también puedes jugar a este juego en tu PC Windows 7? En este artículo, te mostraremos cómo descargar e instalar Candy Crush Saga para PC Windows 7 usando dos métodos diferentes: desde la tienda de Microsoft y usando un emulador. ¡Sigue leyendo para saber más!
-Candy Crush Saga es un juego de puzzle desarrollado por King, una empresa líder en la industria de los juegos casuales. El juego fue lanzado en 2012 y desde entonces se ha convertido en un fenómeno mundial, con más de mil millones de descargas y millones de jugadores activos cada día. El juego es simple pero desafiante: tienes que cambiar y combinar dulces del mismo color para limpiar el tablero y completar los niveles. Hay miles de niveles para jugar, cada uno con diferentes objetivos y obstáculos. También puedes usar potenciadores y dulces especiales para ayudarte. El juego es gratis, pero también puedes comprar movimientos adicionales, vidas y otros artículos con dinero real.
-Download File ○ https://bltlly.com/2v6K90
Mientras que Candy Crush Saga está diseñado principalmente para dispositivos móviles, hay muchas razones por las que es posible que desee jugar en su PC Windows 7. Aquí están algunos de ellos:
-Entonces, ¿cómo se descarga Candy Crush Saga para PC Windows 7? Hay dos maneras de hacerlo: desde la tienda de Microsoft o usando un emulador. Veamos cómo funciona cada método.
-Abra su navegador web y vaya a [el sitio web de Microsoft Store]( 1 ). También puede acceder a la tienda desde su menú de inicio o barra de tareas.
-En la barra de búsqueda en la esquina superior derecha, escriba "Candy Crush Saga" y pulse Enter. Verá una lista de resultados relacionados con su consulta. Haga clic en el que dice "Candy Crush Saga" por king.com.
-En la página del juego, verá un botón que dice "Obtener". Haga clic en él para comenzar a descargar el juego. Es posible que tengas que iniciar sesión con tu cuenta de Microsoft si aún no lo has hecho. El juego es gratuito, pero puede ofrecer compras en la aplicación.
-Una vez completada la descarga, puedes iniciar el juego desde tu menú de inicio o barra de tareas. Verás una pantalla de bienvenida que te pide que conectes tu cuenta de Facebook o que juegues como invitado. También puede elegir su idioma preferido en el menú de configuración. Ahora está listo para jugar Candy Crush Saga en su PC Windows 7!
-Un emulador es un software que le permite ejecutar aplicaciones y juegos para Android en su PC. Hay muchos emuladores disponibles, pero uno de los más populares y fiables es BlueStacks. BlueStacks es gratuito y fácil de usar, y tiene una gran biblioteca de aplicaciones y juegos que puedes descargar y jugar. Estos son los pasos para descargar Candy Crush Saga usando el emulador de BlueStacks:
-Vaya a [el sitio web de BlueStacks] y haga clic en el botón "Descargar BlueStacks". Esto comenzará a descargar el archivo de instalación en su PC. Una vez finalizada la descarga, ejecute el archivo y siga las instrucciones para instalar BlueStacks en su PC. Es posible que necesite conceder algunos permisos y reiniciar su PC durante el proceso.
- -Una vez que haya iniciado sesión, verá la pantalla de inicio de BlueStacks, que parece una tableta Android. En la esquina superior derecha, verá un icono de búsqueda. Haga clic en él y escriba "Candy Crush Saga" en la barra de búsqueda. Verá una lista de resultados relacionados con su consulta. Haga clic en el que dice "Candy Crush Saga" por King.
-En la página del juego, verá un botón que dice "Instalar". Haga clic en él para comenzar a descargar e instalar el juego en BlueStacks. El juego es gratuito, pero puede ofrecer compras en la aplicación. Una vez completada la instalación, puede iniciar el juego desde la pantalla de inicio o el cajón de aplicaciones de BlueStacks. Verás una pantalla de bienvenida que te pide que conectes tu cuenta de Facebook o que juegues como invitado. También puede elegir su idioma preferido en el menú de configuración. Ahora está listo para jugar Candy Crush Saga en su PC Windows 7!
-En este artículo, le hemos mostrado cómo descargar e instalar Candy Crush Saga para PC Windows 7 usando dos métodos diferentes: desde la Tienda de Microsoft y usando un emulador. Ambos métodos son fáciles y seguros, y te permiten disfrutar de este divertido y adictivo juego de puzzle en tu PC Windows 7. También puedes sincronizar tu progreso a través de múltiples dispositivos y jugar sin conexión sin interrupciones.
-Así que, ¿qué estás esperando? Descargar Candy Crush Saga para PC Windows 7 hoy y empezar a emparejar caramelos y niveles de limpieza! Usted tendrá una explosión jugando a este juego, si usted es un principiante o un experto. ¡Y no olvides compartir tu experiencia con nosotros en los comentarios de abajo!
64aa2da5cf{expected_text}
This is bold text.
" - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = html - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a URL - result = scrape_text("https://www.example.com") - - # Check that the function properly handles HTML tags - assert result == "This is bold text." diff --git a/spaces/JoeyFoursheds/ClonerHug/README.md b/spaces/JoeyFoursheds/ClonerHug/README.md deleted file mode 100644 index 46a5dfc4c83840c80f91e918da575995cda99d30..0000000000000000000000000000000000000000 --- a/spaces/JoeyFoursheds/ClonerHug/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ClonerHug -emoji: 🐨 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Snake/con_snake_SVM.py b/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Snake/con_snake_SVM.py deleted file mode 100644 index 896912e1ba8bfbbf46add0fcbb893653533de791..0000000000000000000000000000000000000000 --- a/spaces/JohnCalimoso/animalbreedidentificationversion1.5/Control/Snake/con_snake_SVM.py +++ /dev/null @@ -1,37 +0,0 @@ -import cv2 -import numpy as np -from PIL import Image -import pickle -import tensorflow as tf -import io - -class snakeSVM: - def __init__(self,url) -> None: - self.image = url - - def predict_image(self): - # Load the model - load_extractor = tf.keras.models.load_model("././Model/Snake/resnetxSVM/resnet_EXTRACTOR.h5") - - modelpath = "././Model/Snake/resnetxSVM/dataSaved.pkl" - - with open(modelpath, 'rb') as file: - saved_data = pickle.load(file) - animal_breed = saved_data['class_name'] - model = saved_data['svm_model'] - - im = Image.open(self.image) - img = im.convert("RGB") - img= np.asarray(img) - image_resized= cv2.resize(img, (224,224)) - features = load_extractor.predict(np.expand_dims(image_resized, axis=0)) - - reshaped_features = features.reshape(features.shape[0],-1) - predicted_class = model.predict(reshaped_features) - pred_prob = model.predict_proba(reshaped_features) - prediction_probability = pred_prob[0][predicted_class[0]] - predicted_class - - output_class= animal_breed[predicted_class[0]] - - return [output_class, prediction_probability] diff --git a/spaces/Joom/Front-end-code-generation-from-images/compiler/Node.py b/spaces/Joom/Front-end-code-generation-from-images/compiler/Node.py deleted file mode 100644 index 4dac248505d448edda8aea5b708c3fa7569ceb29..0000000000000000000000000000000000000000 --- a/spaces/Joom/Front-end-code-generation-from-images/compiler/Node.py +++ /dev/null @@ -1,32 +0,0 @@ -from __future__ import print_function -__author__ = 'Taneem Jan, taneemishere.github.io' - - -class Node: - def __init__(self, key, parent_node, content_holder): - self.key = key - self.parent = parent_node - self.children = [] - self.content_holder = content_holder - - def add_child(self, child): - self.children.append(child) - - def show(self): - print(self.key) - for child in self.children: - child.show() - - def render(self, mapping, rendering_function=None): - content = "" - for child in self.children: - content += child.render(mapping, rendering_function) - - value = mapping[self.key] - if rendering_function is not None: - value = rendering_function(self.key, value) - - if len(self.children) != 0: - value = value.replace(self.content_holder, content) - - return value diff --git a/spaces/Junity/Genshin-World-Model/README.md b/spaces/Junity/Genshin-World-Model/README.md deleted file mode 100644 index 0201d907241e380d62dd4cd823bbedb87bd896ca..0000000000000000000000000000000000000000 --- a/spaces/Junity/Genshin-World-Model/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Genshin World Model -emoji: 📈 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py deleted file mode 100644 index f9664fb1f89ef068e923211179e1c7e1ce7fdbd2..0000000000000000000000000000000000000000 --- a/spaces/Kangarroar/ApplioRVC-Inference/infer/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +++ /dev/null @@ -1,87 +0,0 @@ -import numpy as np -import pyworld - -from infer.lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor - - -class HarvestF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.hop_length, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/KaraAgroAI/CADI-AI/app.py b/spaces/KaraAgroAI/CADI-AI/app.py deleted file mode 100644 index 5f4c8f167a0de0fab01268ae16fb00ffb22c1816..0000000000000000000000000000000000000000 --- a/spaces/KaraAgroAI/CADI-AI/app.py +++ /dev/null @@ -1,41 +0,0 @@ -# import gradio as gr - -# gr.Interface.load("models/KaraAgroAI/CADI-AI").launch() - -import gradio as gr -# import cv2 -# import requests -# import os -from PIL import Image -import torch -import ultralytics - -model = torch.hub.load("ultralytics/yolov5", "custom", path="model/yolov5_0.65map_exp7_best.pt", - force_reload=False) - -model.conf = 0.20 # NMS confidence threshold - -# sample test images -path = [['sample-test-images/231.jpg'], ['sample-test-images/82.jpg'], ['sample-test-images/91.jpg']] - -def show_preds_image(im): - - results = model(im) # inference - return results.render()[0] - -inputs_image = [ - gr.components.Image(type="filepath", label="Input Image"), -] -outputs_image = [ - gr.components.Image(type="filepath", label="Output Image"), -] -interface_image = gr.Interface( - fn=show_preds_image, - inputs=inputs_image, - outputs=outputs_image, - title="Cashew Disease Identification with AI", - examples=path, - cache_examples=False, -) - -interface_image.launch() \ No newline at end of file diff --git a/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/deep_sort/deep/__init__.py b/spaces/KdaiP/yolov8-deepsort-tracking/deep_sort/deep_sort/deep/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/components/outputs.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/components/outputs.py deleted file mode 100644 index f4859c64b9e21114436e57863fedd5fd161da627..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/mkgui/base/components/outputs.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import List - -from pydantic import BaseModel - - -class ScoredLabel(BaseModel): - label: str - score: float - - -class ClassificationOutput(BaseModel): - __root__: List[ScoredLabel] - - def __iter__(self): # type: ignore - return iter(self.__root__) - - def __getitem__(self, item): # type: ignore - return self.__root__[item] - - def render_output_ui(self, streamlit) -> None: # type: ignore - import plotly.express as px - - sorted_predictions = sorted( - [prediction.dict() for prediction in self.__root__], - key=lambda k: k["score"], - ) - - num_labels = len(sorted_predictions) - if len(sorted_predictions) > 10: - num_labels = streamlit.slider( - "Maximum labels to show: ", - min_value=1, - max_value=len(sorted_predictions), - value=len(sorted_predictions), - ) - fig = px.bar( - sorted_predictions[len(sorted_predictions) - num_labels :], - x="score", - y="label", - orientation="h", - ) - streamlit.plotly_chart(fig, use_container_width=True) - # fig.show() diff --git a/spaces/Kevin676/Telephone-Interviewing_PpaddleSpeech-TTS/README.md b/spaces/Kevin676/Telephone-Interviewing_PpaddleSpeech-TTS/README.md deleted file mode 100644 index df87f11a897d0646ad0a6e943b78adc34867482a..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Telephone-Interviewing_PpaddleSpeech-TTS/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChatGLM-6B-with-Voice-Cloning-Paddle -emoji: 🔥 -colorFrom: blue -colorTo: blue -sdk: gradio -sdk_version: 3.22.1 -app_file: app.py -pinned: false -license: mit -duplicated_from: DeepLearning101/Telephone-Interviewing_PpaddleSpeech-TTS ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/transliterate/acronym_transliterator.py b/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/transliterate/acronym_transliterator.py deleted file mode 100644 index 09c71d15a23bbd56119c046aa5ddf76b7a42851b..0000000000000000000000000000000000000000 --- a/spaces/Kimata/Sanskrit-TTS/indic_nlp_library/indicnlp/transliterate/acronym_transliterator.py +++ /dev/null @@ -1,75 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2013-present, Anoop Kunchukuttan -# All rights reserved. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -# - -#Program to transliterate acronyms from one Latin script to Indic languages -# -# @author Anoop Kunchukuttan -# - -from indicnlp.transliterate.unicode_transliterate import UnicodeIndicTransliterator -import string -import random - -class LatinToIndicAcronymTransliterator(object): - - LATIN_TO_DEVANAGARI_TRANSTABLE = str.maketrans({ - 'a':'ए', - 'b':'बी', - 'c':'सी', - 'd':'डी', - 'e':'ई', - 'f':'एफ', - 'g':'जी', - 'h':'एच', - 'i':'आई', - 'j':'जे', - 'k':'के', - 'l':'एल', - 'm':'एम', - 'n':'एन', - 'o':'ओ', - 'p':'पी', - 'q':'क्यू', - 'r':'आर', - 's':'एस', - 't':'टी', - 'u':'यू', - 'v':'वी', - 'w':'डब्ल्यू', - 'x':'एक्स', - 'y':'वाय', - 'z':'जेड', - }) - - # a_unichr=ord('a') - # alphabet = [ chr(a_unichr+n) for n in range(26) ] - LATIN_ALPHABET = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] - - @staticmethod - def get_transtable(): - return LatinToIndicAcronymTransliterator.LATIN_TO_DEVANAGARI_TRANSTABLE - - @staticmethod - def transliterate(w,lang): - return UnicodeIndicTransliterator.transliterate(w.lower().translate(LatinToIndicAcronymTransliterator.LATIN_TO_DEVANAGARI_TRANSTABLE),'hi',lang) - - @staticmethod - def generate_latin_acronyms(num_acronyms, min_len=2, max_len=6, strategy='random'): - """ - generate Latin acronyms in lower case - """ - - def sample_acronym(strategy='random'): - if strategy=='random': - slen=random.randint(min_len,max_len) - return ''.join(random.choices(LatinToIndicAcronymTransliterator.LATIN_ALPHABET,k=slen)) - - - return [ sample_acronym(strategy) for i in range(num_acronyms) ] - \ No newline at end of file diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/sabl_retina_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/sabl_retina_head.py deleted file mode 100644 index 8cd1b71cc2c80035a0378180da70caddf853375d..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/dense_heads/sabl_retina_head.py +++ /dev/null @@ -1,706 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmengine.config import ConfigDict -from mmengine.structures import InstanceData -from torch import Tensor - -from mmdet.registry import MODELS, TASK_UTILS -from mmdet.utils import (ConfigType, InstanceList, MultiConfig, OptConfigType, - OptInstanceList) -from ..task_modules.samplers import PseudoSampler -from ..utils import (filter_scores_and_topk, images_to_levels, multi_apply, - unmap) -from .base_dense_head import BaseDenseHead -from .guided_anchor_head import GuidedAnchorHead - - -@MODELS.register_module() -class SABLRetinaHead(BaseDenseHead): - """Side-Aware Boundary Localization (SABL) for RetinaNet. - - The anchor generation, assigning and sampling in SABLRetinaHead - are the same as GuidedAnchorHead for guided anchoring. - - Please refer to https://arxiv.org/abs/1912.04260 for more details. - - Args: - num_classes (int): Number of classes. - in_channels (int): Number of channels in the input feature map. - stacked_convs (int): Number of Convs for classification and - regression branches. Defaults to 4. - feat_channels (int): Number of hidden channels. Defaults to 256. - approx_anchor_generator (:obj:`ConfigType` or dict): Config dict for - approx generator. - square_anchor_generator (:obj:`ConfigDict` or dict): Config dict for - square generator. - conv_cfg (:obj:`ConfigDict` or dict, optional): Config dict for - ConvModule. Defaults to None. - norm_cfg (:obj:`ConfigDict` or dict, optional): Config dict for - Norm Layer. Defaults to None. - bbox_coder (:obj:`ConfigDict` or dict): Config dict for bbox coder. - reg_decoded_bbox (bool): If true, the regression loss would be - applied directly on decoded bounding boxes, converting both - the predicted boxes and regression targets to absolute - coordinates format. Default False. It should be ``True`` when - using ``IoULoss``, ``GIoULoss``, or ``DIoULoss`` in the bbox head. - train_cfg (:obj:`ConfigDict` or dict, optional): Training config of - SABLRetinaHead. - test_cfg (:obj:`ConfigDict` or dict, optional): Testing config of - SABLRetinaHead. - loss_cls (:obj:`ConfigDict` or dict): Config of classification loss. - loss_bbox_cls (:obj:`ConfigDict` or dict): Config of classification - loss for bbox branch. - loss_bbox_reg (:obj:`ConfigDict` or dict): Config of regression loss - for bbox branch. - init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ - dict], optional): Initialization config dict. - """ - - def __init__( - self, - num_classes: int, - in_channels: int, - stacked_convs: int = 4, - feat_channels: int = 256, - approx_anchor_generator: ConfigType = dict( - type='AnchorGenerator', - octave_base_scale=4, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[8, 16, 32, 64, 128]), - square_anchor_generator: ConfigType = dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[4], - strides=[8, 16, 32, 64, 128]), - conv_cfg: OptConfigType = None, - norm_cfg: OptConfigType = None, - bbox_coder: ConfigType = dict( - type='BucketingBBoxCoder', num_buckets=14, scale_factor=3.0), - reg_decoded_bbox: bool = False, - train_cfg: OptConfigType = None, - test_cfg: OptConfigType = None, - loss_cls: ConfigType = dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox_cls: ConfigType = dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.5), - loss_bbox_reg: ConfigType = dict( - type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5), - init_cfg: MultiConfig = dict( - type='Normal', - layer='Conv2d', - std=0.01, - override=dict( - type='Normal', name='retina_cls', std=0.01, bias_prob=0.01)) - ) -> None: - super().__init__(init_cfg=init_cfg) - self.in_channels = in_channels - self.num_classes = num_classes - self.feat_channels = feat_channels - self.num_buckets = bbox_coder['num_buckets'] - self.side_num = int(np.ceil(self.num_buckets / 2)) - - assert (approx_anchor_generator['octave_base_scale'] == - square_anchor_generator['scales'][0]) - assert (approx_anchor_generator['strides'] == - square_anchor_generator['strides']) - - self.approx_anchor_generator = TASK_UTILS.build( - approx_anchor_generator) - self.square_anchor_generator = TASK_UTILS.build( - square_anchor_generator) - self.approxs_per_octave = ( - self.approx_anchor_generator.num_base_priors[0]) - - # one anchor per location - self.num_base_priors = self.square_anchor_generator.num_base_priors[0] - - self.stacked_convs = stacked_convs - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - self.reg_decoded_bbox = reg_decoded_bbox - - self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False) - if self.use_sigmoid_cls: - self.cls_out_channels = num_classes - else: - self.cls_out_channels = num_classes + 1 - - self.bbox_coder = TASK_UTILS.build(bbox_coder) - self.loss_cls = MODELS.build(loss_cls) - self.loss_bbox_cls = MODELS.build(loss_bbox_cls) - self.loss_bbox_reg = MODELS.build(loss_bbox_reg) - - self.train_cfg = train_cfg - self.test_cfg = test_cfg - - if self.train_cfg: - self.assigner = TASK_UTILS.build(self.train_cfg['assigner']) - # use PseudoSampler when sampling is False - if 'sampler' in self.train_cfg: - self.sampler = TASK_UTILS.build( - self.train_cfg['sampler'], default_args=dict(context=self)) - else: - self.sampler = PseudoSampler(context=self) - - self._init_layers() - - def _init_layers(self) -> None: - self.relu = nn.ReLU(inplace=True) - self.cls_convs = nn.ModuleList() - self.reg_convs = nn.ModuleList() - for i in range(self.stacked_convs): - chn = self.in_channels if i == 0 else self.feat_channels - self.cls_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.reg_convs.append( - ConvModule( - chn, - self.feat_channels, - 3, - stride=1, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - self.retina_cls = nn.Conv2d( - self.feat_channels, self.cls_out_channels, 3, padding=1) - self.retina_bbox_reg = nn.Conv2d( - self.feat_channels, self.side_num * 4, 3, padding=1) - self.retina_bbox_cls = nn.Conv2d( - self.feat_channels, self.side_num * 4, 3, padding=1) - - def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]: - cls_feat = x - reg_feat = x - for cls_conv in self.cls_convs: - cls_feat = cls_conv(cls_feat) - for reg_conv in self.reg_convs: - reg_feat = reg_conv(reg_feat) - cls_score = self.retina_cls(cls_feat) - bbox_cls_pred = self.retina_bbox_cls(reg_feat) - bbox_reg_pred = self.retina_bbox_reg(reg_feat) - bbox_pred = (bbox_cls_pred, bbox_reg_pred) - return cls_score, bbox_pred - - def forward(self, feats: List[Tensor]) -> Tuple[List[Tensor]]: - return multi_apply(self.forward_single, feats) - - def get_anchors( - self, - featmap_sizes: List[tuple], - img_metas: List[dict], - device: Union[torch.device, str] = 'cuda' - ) -> Tuple[List[List[Tensor]], List[List[Tensor]]]: - """Get squares according to feature map sizes and guided anchors. - - Args: - featmap_sizes (list[tuple]): Multi-level feature map sizes. - img_metas (list[dict]): Image meta info. - device (torch.device | str): device for returned tensors - - Returns: - tuple: square approxs of each image - """ - num_imgs = len(img_metas) - - # since feature map sizes of all images are the same, we only compute - # squares for one time - multi_level_squares = self.square_anchor_generator.grid_priors( - featmap_sizes, device=device) - squares_list = [multi_level_squares for _ in range(num_imgs)] - - return squares_list - - def get_targets(self, - approx_list: List[List[Tensor]], - inside_flag_list: List[List[Tensor]], - square_list: List[List[Tensor]], - batch_gt_instances: InstanceList, - batch_img_metas, - batch_gt_instances_ignore: OptInstanceList = None, - unmap_outputs=True) -> tuple: - """Compute bucketing targets. - - Args: - approx_list (list[list[Tensor]]): Multi level approxs of each - image. - inside_flag_list (list[list[Tensor]]): Multi level inside flags of - each image. - square_list (list[list[Tensor]]): Multi level squares of each - image. - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes`` and ``labels`` - attributes. - batch_img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): - Batch of gt_instances_ignore. It includes ``bboxes`` attribute - data that is ignored during training and testing. - Defaults to None. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. Defaults to True. - - Returns: - tuple: Returns a tuple containing learning targets. - - - labels_list (list[Tensor]): Labels of each level. - - label_weights_list (list[Tensor]): Label weights of each level. - - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \ - each level. - - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \ - each level. - - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \ - each level. - - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \ - each level. - - num_total_pos (int): Number of positive samples in all images. - - num_total_neg (int): Number of negative samples in all images. - """ - num_imgs = len(batch_img_metas) - assert len(approx_list) == len(inside_flag_list) == len( - square_list) == num_imgs - # anchor number of multi levels - num_level_squares = [squares.size(0) for squares in square_list[0]] - # concat all level anchors and flags to a single tensor - inside_flag_flat_list = [] - approx_flat_list = [] - square_flat_list = [] - for i in range(num_imgs): - assert len(square_list[i]) == len(inside_flag_list[i]) - inside_flag_flat_list.append(torch.cat(inside_flag_list[i])) - approx_flat_list.append(torch.cat(approx_list[i])) - square_flat_list.append(torch.cat(square_list[i])) - - # compute targets for each image - if batch_gt_instances_ignore is None: - batch_gt_instances_ignore = [None for _ in range(num_imgs)] - (all_labels, all_label_weights, all_bbox_cls_targets, - all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights, - pos_inds_list, neg_inds_list, sampling_results_list) = multi_apply( - self._get_targets_single, - approx_flat_list, - inside_flag_flat_list, - square_flat_list, - batch_gt_instances, - batch_img_metas, - batch_gt_instances_ignore, - unmap_outputs=unmap_outputs) - - # sampled anchors of all images - avg_factor = sum( - [results.avg_factor for results in sampling_results_list]) - # split targets to a list w.r.t. multiple levels - labels_list = images_to_levels(all_labels, num_level_squares) - label_weights_list = images_to_levels(all_label_weights, - num_level_squares) - bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets, - num_level_squares) - bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights, - num_level_squares) - bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets, - num_level_squares) - bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights, - num_level_squares) - return (labels_list, label_weights_list, bbox_cls_targets_list, - bbox_cls_weights_list, bbox_reg_targets_list, - bbox_reg_weights_list, avg_factor) - - def _get_targets_single(self, - flat_approxs: Tensor, - inside_flags: Tensor, - flat_squares: Tensor, - gt_instances: InstanceData, - img_meta: dict, - gt_instances_ignore: Optional[InstanceData] = None, - unmap_outputs: bool = True) -> tuple: - """Compute regression and classification targets for anchors in a - single image. - - Args: - flat_approxs (Tensor): flat approxs of a single image, - shape (n, 4) - inside_flags (Tensor): inside flags of a single image, - shape (n, ). - flat_squares (Tensor): flat squares of a single image, - shape (approxs_per_octave * n, 4) - gt_instances (:obj:`InstanceData`): Ground truth of instance - annotations. It should includes ``bboxes`` and ``labels`` - attributes. - img_meta (dict): Meta information for current image. - gt_instances_ignore (:obj:`InstanceData`, optional): Instances - to be ignored during training. It includes ``bboxes`` attribute - data that is ignored during training and testing. - Defaults to None. - unmap_outputs (bool): Whether to map outputs back to the original - set of anchors. Defaults to True. - - Returns: - tuple: - - - labels_list (Tensor): Labels in a single image. - - label_weights (Tensor): Label weights in a single image. - - bbox_cls_targets (Tensor): BBox cls targets in a single image. - - bbox_cls_weights (Tensor): BBox cls weights in a single image. - - bbox_reg_targets (Tensor): BBox reg targets in a single image. - - bbox_reg_weights (Tensor): BBox reg weights in a single image. - - num_total_pos (int): Number of positive samples in a single \ - image. - - num_total_neg (int): Number of negative samples in a single \ - image. - - sampling_result (:obj:`SamplingResult`): Sampling result object. - """ - if not inside_flags.any(): - raise ValueError( - 'There is no valid anchor inside the image boundary. Please ' - 'check the image size and anchor sizes, or set ' - '``allowed_border`` to -1 to skip the condition.') - # assign gt and sample anchors - num_square = flat_squares.size(0) - approxs = flat_approxs.view(num_square, self.approxs_per_octave, 4) - approxs = approxs[inside_flags, ...] - squares = flat_squares[inside_flags, :] - - pred_instances = InstanceData() - pred_instances.priors = squares - pred_instances.approxs = approxs - assign_result = self.assigner.assign(pred_instances, gt_instances, - gt_instances_ignore) - sampling_result = self.sampler.sample(assign_result, pred_instances, - gt_instances) - - num_valid_squares = squares.shape[0] - bbox_cls_targets = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - bbox_cls_weights = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - bbox_reg_targets = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - bbox_reg_weights = squares.new_zeros( - (num_valid_squares, self.side_num * 4)) - labels = squares.new_full((num_valid_squares, ), - self.num_classes, - dtype=torch.long) - label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float) - - pos_inds = sampling_result.pos_inds - neg_inds = sampling_result.neg_inds - if len(pos_inds) > 0: - (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets, - pos_bbox_cls_weights) = self.bbox_coder.encode( - sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes) - - bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets - bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets - bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights - bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights - labels[pos_inds] = sampling_result.pos_gt_labels - if self.train_cfg['pos_weight'] <= 0: - label_weights[pos_inds] = 1.0 - else: - label_weights[pos_inds] = self.train_cfg['pos_weight'] - if len(neg_inds) > 0: - label_weights[neg_inds] = 1.0 - - # map up to original set of anchors - if unmap_outputs: - num_total_anchors = flat_squares.size(0) - labels = unmap( - labels, num_total_anchors, inside_flags, fill=self.num_classes) - label_weights = unmap(label_weights, num_total_anchors, - inside_flags) - bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors, - inside_flags) - bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors, - inside_flags) - bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors, - inside_flags) - bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors, - inside_flags) - return (labels, label_weights, bbox_cls_targets, bbox_cls_weights, - bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds, - sampling_result) - - def loss_by_feat_single(self, cls_score: Tensor, bbox_pred: Tensor, - labels: Tensor, label_weights: Tensor, - bbox_cls_targets: Tensor, bbox_cls_weights: Tensor, - bbox_reg_targets: Tensor, bbox_reg_weights: Tensor, - avg_factor: float) -> Tuple[Tensor]: - """Calculate the loss of a single scale level based on the features - extracted by the detection head. - - Args: - cls_score (Tensor): Box scores for each scale level - Has shape (N, num_anchors * num_classes, H, W). - bbox_pred (Tensor): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W). - labels (Tensor): Labels in a single image. - label_weights (Tensor): Label weights in a single level. - bbox_cls_targets (Tensor): BBox cls targets in a single level. - bbox_cls_weights (Tensor): BBox cls weights in a single level. - bbox_reg_targets (Tensor): BBox reg targets in a single level. - bbox_reg_weights (Tensor): BBox reg weights in a single level. - avg_factor (int): Average factor that is used to average the loss. - - Returns: - tuple: loss components. - """ - # classification loss - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) - cls_score = cls_score.permute(0, 2, 3, - 1).reshape(-1, self.cls_out_channels) - loss_cls = self.loss_cls( - cls_score, labels, label_weights, avg_factor=avg_factor) - # regression loss - bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4) - bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4) - bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4) - bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4) - (bbox_cls_pred, bbox_reg_pred) = bbox_pred - bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape( - -1, self.side_num * 4) - bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape( - -1, self.side_num * 4) - loss_bbox_cls = self.loss_bbox_cls( - bbox_cls_pred, - bbox_cls_targets.long(), - bbox_cls_weights, - avg_factor=avg_factor * 4 * self.side_num) - loss_bbox_reg = self.loss_bbox_reg( - bbox_reg_pred, - bbox_reg_targets, - bbox_reg_weights, - avg_factor=avg_factor * 4 * self.bbox_coder.offset_topk) - return loss_cls, loss_bbox_cls, loss_bbox_reg - - def loss_by_feat( - self, - cls_scores: List[Tensor], - bbox_preds: List[Tensor], - batch_gt_instances: InstanceList, - batch_img_metas: List[dict], - batch_gt_instances_ignore: OptInstanceList = None) -> dict: - """Calculate the loss based on the features extracted by the detection - head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - has shape (N, num_anchors * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_anchors * 4, H, W). - batch_gt_instances (list[:obj:`InstanceData`]): Batch of - gt_instance. It usually includes ``bboxes`` and ``labels`` - attributes. - batch_img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - batch_gt_instances_ignore (list[:obj:`InstanceData`], optional): - Batch of gt_instances_ignore. It includes ``bboxes`` attribute - data that is ignored during training and testing. - Defaults to None. - - Returns: - dict: A dictionary of loss components. - """ - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - assert len(featmap_sizes) == self.approx_anchor_generator.num_levels - - device = cls_scores[0].device - - # get sampled approxes - approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs( - self, featmap_sizes, batch_img_metas, device=device) - - square_list = self.get_anchors( - featmap_sizes, batch_img_metas, device=device) - - cls_reg_targets = self.get_targets( - approxs_list, - inside_flag_list, - square_list, - batch_gt_instances, - batch_img_metas, - batch_gt_instances_ignore=batch_gt_instances_ignore) - (labels_list, label_weights_list, bbox_cls_targets_list, - bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list, - avg_factor) = cls_reg_targets - - losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply( - self.loss_by_feat_single, - cls_scores, - bbox_preds, - labels_list, - label_weights_list, - bbox_cls_targets_list, - bbox_cls_weights_list, - bbox_reg_targets_list, - bbox_reg_weights_list, - avg_factor=avg_factor) - return dict( - loss_cls=losses_cls, - loss_bbox_cls=losses_bbox_cls, - loss_bbox_reg=losses_bbox_reg) - - def predict_by_feat(self, - cls_scores: List[Tensor], - bbox_preds: List[Tensor], - batch_img_metas: List[dict], - cfg: Optional[ConfigDict] = None, - rescale: bool = False, - with_nms: bool = True) -> InstanceList: - """Transform a batch of output features extracted from the head into - bbox results. - - Note: When score_factors is not None, the cls_scores are - usually multiplied by it then obtain the real score used in NMS, - such as CenterNess in FCOS, IoU branch in ATSS. - - Args: - cls_scores (list[Tensor]): Classification scores for all - scale levels, each is a 4D-tensor, has shape - (batch_size, num_priors * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for all - scale levels, each is a 4D-tensor, has shape - (batch_size, num_priors * 4, H, W). - batch_img_metas (list[dict], Optional): Batch image meta info. - cfg (:obj:`ConfigDict`, optional): Test / postprocessing - configuration, if None, test_cfg would be used. - Defaults to None. - rescale (bool): If True, return boxes in original image space. - Defaults to False. - with_nms (bool): If True, do nms before return boxes. - Defaults to True. - - Returns: - list[:obj:`InstanceData`]: Object detection results of each image - after the post process. Each item usually contains following keys. - - - scores (Tensor): Classification scores, has a shape - (num_instance, ) - - labels (Tensor): Labels of bboxes, has a shape - (num_instances, ). - - bboxes (Tensor): Has a shape (num_instances, 4), - the last dimension 4 arrange as (x1, y1, x2, y2). - """ - assert len(cls_scores) == len(bbox_preds) - num_levels = len(cls_scores) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - - device = cls_scores[0].device - mlvl_anchors = self.get_anchors( - featmap_sizes, batch_img_metas, device=device) - result_list = [] - for img_id in range(len(batch_img_metas)): - cls_score_list = [ - cls_scores[i][img_id].detach() for i in range(num_levels) - ] - bbox_cls_pred_list = [ - bbox_preds[i][0][img_id].detach() for i in range(num_levels) - ] - bbox_reg_pred_list = [ - bbox_preds[i][1][img_id].detach() for i in range(num_levels) - ] - proposals = self._predict_by_feat_single( - cls_scores=cls_score_list, - bbox_cls_preds=bbox_cls_pred_list, - bbox_reg_preds=bbox_reg_pred_list, - mlvl_anchors=mlvl_anchors[img_id], - img_meta=batch_img_metas[img_id], - cfg=cfg, - rescale=rescale, - with_nms=with_nms) - result_list.append(proposals) - return result_list - - def _predict_by_feat_single(self, - cls_scores: List[Tensor], - bbox_cls_preds: List[Tensor], - bbox_reg_preds: List[Tensor], - mlvl_anchors: List[Tensor], - img_meta: dict, - cfg: ConfigDict, - rescale: bool = False, - with_nms: bool = True) -> InstanceData: - cfg = self.test_cfg if cfg is None else cfg - nms_pre = cfg.get('nms_pre', -1) - - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_confids = [] - mlvl_labels = [] - assert len(cls_scores) == len(bbox_cls_preds) == len( - bbox_reg_preds) == len(mlvl_anchors) - for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip( - cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors): - assert cls_score.size()[-2:] == bbox_cls_pred.size( - )[-2:] == bbox_reg_pred.size()[-2::] - cls_score = cls_score.permute(1, 2, - 0).reshape(-1, self.cls_out_channels) - if self.use_sigmoid_cls: - scores = cls_score.sigmoid() - else: - scores = cls_score.softmax(-1)[:, :-1] - bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape( - -1, self.side_num * 4) - bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape( - -1, self.side_num * 4) - - # After https://github.com/open-mmlab/mmdetection/pull/6268/, - # this operation keeps fewer bboxes under the same `nms_pre`. - # There is no difference in performance for most models. If you - # find a slight drop in performance, you can set a larger - # `nms_pre` than before. - results = filter_scores_and_topk( - scores, cfg.score_thr, nms_pre, - dict( - anchors=anchors, - bbox_cls_pred=bbox_cls_pred, - bbox_reg_pred=bbox_reg_pred)) - scores, labels, _, filtered_results = results - - anchors = filtered_results['anchors'] - bbox_cls_pred = filtered_results['bbox_cls_pred'] - bbox_reg_pred = filtered_results['bbox_reg_pred'] - - bbox_preds = [ - bbox_cls_pred.contiguous(), - bbox_reg_pred.contiguous() - ] - bboxes, confids = self.bbox_coder.decode( - anchors.contiguous(), - bbox_preds, - max_shape=img_meta['img_shape']) - - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_confids.append(confids) - mlvl_labels.append(labels) - - results = InstanceData() - results.bboxes = torch.cat(mlvl_bboxes) - results.scores = torch.cat(mlvl_scores) - results.score_factors = torch.cat(mlvl_confids) - results.labels = torch.cat(mlvl_labels) - - return self._bbox_post_process( - results=results, - cfg=cfg, - rescale=rescale, - with_nms=with_nms, - img_meta=img_meta) diff --git a/spaces/KyanChen/RSPrompter/mmpl/utils/misc.py b/spaces/KyanChen/RSPrompter/mmpl/utils/misc.py deleted file mode 100644 index 8633db7d95a1446586a469a873f7123a89b5f6f8..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpl/utils/misc.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import urllib - -import numpy as np -import torch -from mmengine.utils import scandir -from prettytable import PrettyTable - -# from mmyolo.models import RepVGGBlock - -IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', - '.tiff', '.webp') - - -def switch_to_deploy(model): - """Model switch to deploy status.""" - for layer in model.modules(): - if isinstance(layer, RepVGGBlock): - layer.switch_to_deploy() - - print('Switch model to deploy modality.') - - -def auto_arrange_images(image_list: list, image_column: int = 2) -> np.ndarray: - """Auto arrange image to image_column x N row. - - Args: - image_list (list): cv2 image list. - image_column (int): Arrange to N column. Default: 2. - Return: - (np.ndarray): image_column x N row merge image - """ - img_count = len(image_list) - if img_count <= image_column: - # no need to arrange - image_show = np.concatenate(image_list, axis=1) - else: - # arrange image according to image_column - image_row = round(img_count / image_column) - fill_img_list = [np.ones(image_list[0].shape, dtype=np.uint8) * 255 - ] * ( - image_row * image_column - img_count) - image_list.extend(fill_img_list) - merge_imgs_col = [] - for i in range(image_row): - start_col = image_column * i - end_col = image_column * (i + 1) - merge_col = np.hstack(image_list[start_col:end_col]) - merge_imgs_col.append(merge_col) - - # merge to one image - image_show = np.vstack(merge_imgs_col) - - return image_show - - -def get_file_list(source_root: str) -> [list, dict]: - """Get file list. - - Args: - source_root (str): image or video source path - - Return: - source_file_path_list (list): A list for all source file. - source_type (dict): Source type: file or url or dir. - """ - is_dir = os.path.isdir(source_root) - is_url = source_root.startswith(('http:/', 'https:/')) - is_file = os.path.splitext(source_root)[-1].lower() in IMG_EXTENSIONS - - source_file_path_list = [] - if is_dir: - # when input source is dir - for file in scandir(source_root, IMG_EXTENSIONS, recursive=True): - source_file_path_list.append(os.path.join(source_root, file)) - elif is_url: - # when input source is url - filename = os.path.basename( - urllib.parse.unquote(source_root).split('?')[0]) - file_save_path = os.path.join(os.getcwd(), filename) - print(f'Downloading source file to {file_save_path}') - torch.hub.download_url_to_file(source_root, file_save_path) - source_file_path_list = [file_save_path] - elif is_file: - # when input source is single image - source_file_path_list = [source_root] - else: - print('Cannot find image file.') - - source_type = dict(is_dir=is_dir, is_url=is_url, is_file=is_file) - - return source_file_path_list, source_type - - -def show_data_classes(data_classes): - """When printing an error, all class names of the dataset.""" - print('\n\nThe name of the class contained in the dataset:') - data_classes_info = PrettyTable() - data_classes_info.title = 'Information of dataset class' - # List Print Settings - # If the quantity is too large, 25 rows will be displayed in each column - if len(data_classes) < 25: - data_classes_info.add_column('Class name', data_classes) - elif len(data_classes) % 25 != 0 and len(data_classes) > 25: - col_num = int(len(data_classes) / 25) + 1 - data_name_list = list(data_classes) - for i in range(0, (col_num * 25) - len(data_classes)): - data_name_list.append('') - for i in range(0, len(data_name_list), 25): - data_classes_info.add_column('Class name', - data_name_list[i:i + 25]) - - # Align display data to the left - data_classes_info.align['Class name'] = 'l' - print(data_classes_info) - - -def is_metainfo_lower(cfg): - """Determine whether the custom metainfo fields are all lowercase.""" - - def judge_keys(dataloader_cfg): - while 'dataset' in dataloader_cfg: - dataloader_cfg = dataloader_cfg['dataset'] - if 'metainfo' in dataloader_cfg: - all_keys = dataloader_cfg['metainfo'].keys() - all_is_lower = all([str(k).islower() for k in all_keys]) - assert all_is_lower, f'The keys in dataset metainfo must be all lowercase, but got {all_keys}. ' \ - f'Please refer to https://github.com/open-mmlab/mmyolo/blob/e62c8c4593/configs/yolov5/yolov5_s-v61_syncbn_fast_1xb4-300e_balloon.py#L8' # noqa - - judge_keys(cfg.get('train_dataloader', {})) - judge_keys(cfg.get('val_dataloader', {})) - judge_keys(cfg.get('test_dataloader', {})) diff --git a/spaces/Laihiujin/OneFormer/oneformer/data/dataset_mappers/coco_unified_new_baseline_dataset_mapper.py b/spaces/Laihiujin/OneFormer/oneformer/data/dataset_mappers/coco_unified_new_baseline_dataset_mapper.py deleted file mode 100644 index 677b9eaf25e25e98c6e7d39a6c77a29f2f313d3c..0000000000000000000000000000000000000000 --- a/spaces/Laihiujin/OneFormer/oneformer/data/dataset_mappers/coco_unified_new_baseline_dataset_mapper.py +++ /dev/null @@ -1,341 +0,0 @@ -# ------------------------------------------------------------------------------ -# Reference: https://github.com/facebookresearch/Mask2Former/blob/main/mask2former/data/dataset_mappers/coco_panoptic_new_baseline_dataset_mapper.py -# Modified by Jitesh Jain (https://github.com/praeclarumjj3) -# ------------------------------------------------------------------------------ - -import copy -import logging - -import numpy as np -import torch - -from detectron2.data import MetadataCatalog -from detectron2.config import configurable -from detectron2.data import detection_utils as utils -from detectron2.data import transforms as T -from detectron2.structures import BitMasks, Instances -from oneformer.utils.box_ops import masks_to_boxes -from oneformer.data.tokenizer import SimpleTokenizer, Tokenize - -__all__ = ["COCOUnifiedNewBaselineDatasetMapper"] - - -def build_transform_gen(cfg, is_train): - """ - Create a list of default :class:`Augmentation` from config. - Now it includes resizing and flipping. - Returns: - list[Augmentation] - """ - assert is_train, "Only support training augmentation" - image_size = cfg.INPUT.IMAGE_SIZE - min_scale = cfg.INPUT.MIN_SCALE - max_scale = cfg.INPUT.MAX_SCALE - - augmentation = [] - - if cfg.INPUT.RANDOM_FLIP != "none": - augmentation.append( - T.RandomFlip( - horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal", - vertical=cfg.INPUT.RANDOM_FLIP == "vertical", - ) - ) - - augmentation.extend([ - T.ResizeScale( - min_scale=min_scale, max_scale=max_scale, target_height=image_size, target_width=image_size - ), - T.FixedSizeCrop(crop_size=(image_size, image_size)), - ]) - - return augmentation - - -# This is specifically designed for the COCO dataset. -class COCOUnifiedNewBaselineDatasetMapper: - """ - A callable which takes a dataset dict in Detectron2 Dataset format, - and map it into a format used by OneFormer. - - This dataset mapper applies the same transformation as DETR for COCO panoptic segmentation. - - The callable currently does the following: - - 1. Read the image from "file_name" - 2. Applies geometric transforms to the image and annotation - 3. Find and applies suitable cropping to the image and annotation - 4. Prepare image and annotation to Tensors - """ - - @configurable - def __init__( - self, - is_train=True, - *, - num_queries, - tfm_gens, - meta, - image_format, - max_seq_len, - task_seq_len, - semantic_prob, - instance_prob, - ): - """ - NOTE: this interface is experimental. - Args: - is_train: for training or inference - augmentations: a list of augmentations or deterministic transforms to apply - crop_gen: crop augmentation - tfm_gens: data augmentation - image_format: an image format supported by :func:`detection_utils.read_image`. - """ - self.tfm_gens = tfm_gens - logging.getLogger(__name__).info( - "[COCOUnifiedNewBaselineDatasetMapper] Full TransformGens used in training: {}".format( - str(self.tfm_gens) - ) - ) - - self.img_format = image_format - self.is_train = is_train - self.meta = meta - self.ignore_label = self.meta.ignore_label - self.num_queries = num_queries - - self.things = [] - for k,v in self.meta.thing_dataset_id_to_contiguous_id.items(): - self.things.append(v) - self.class_names = self.meta.stuff_classes - self.text_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=max_seq_len) - self.task_tokenizer = Tokenize(SimpleTokenizer(), max_seq_len=task_seq_len) - self.semantic_prob = semantic_prob - self.instance_prob = instance_prob - - @classmethod - def from_config(cls, cfg, is_train=True): - # Build augmentation - tfm_gens = build_transform_gen(cfg, is_train) - dataset_names = cfg.DATASETS.TRAIN - meta = MetadataCatalog.get(dataset_names[0]) - - ret = { - "is_train": is_train, - "meta": meta, - "tfm_gens": tfm_gens, - "image_format": cfg.INPUT.FORMAT, - "num_queries": cfg.MODEL.ONE_FORMER.NUM_OBJECT_QUERIES - cfg.MODEL.TEXT_ENCODER.N_CTX, - "task_seq_len": cfg.INPUT.TASK_SEQ_LEN, - "max_seq_len": cfg.INPUT.MAX_SEQ_LEN, - "semantic_prob": cfg.INPUT.TASK_PROB.SEMANTIC, - "instance_prob": cfg.INPUT.TASK_PROB.INSTANCE, - } - return ret - - def _get_semantic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj): - instances = Instances(image_shape) - - classes = [] - texts = ["a semantic photo"] * self.num_queries - masks = [] - label = np.ones_like(pan_seg_gt) * self.ignore_label - - for segment_info in segments_info: - class_id = segment_info["category_id"] - if not segment_info["iscrowd"]: - mask = pan_seg_gt == segment_info["id"] - if not np.all(mask == False): - if class_id not in classes: - cls_name = self.class_names[class_id] - classes.append(class_id) - masks.append(mask) - num_class_obj[cls_name] += 1 - else: - idx = classes.index(class_id) - masks[idx] += mask - masks[idx] = np.clip(masks[idx], 0, 1).astype(np.bool) - label[mask] = class_id - - num = 0 - for i, cls_name in enumerate(self.class_names): - if num_class_obj[cls_name] > 0: - for _ in range(num_class_obj[cls_name]): - if num >= len(texts): - break - texts[num] = f"a photo with a {cls_name}" - num += 1 - - classes = np.array(classes) - instances.gt_classes = torch.tensor(classes, dtype=torch.int64) - if len(masks) == 0: - # Some image does not have annotation (all ignored) - instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1])) - instances.gt_bboxes = torch.zeros((0, 4)) - else: - masks = BitMasks( - torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks]) - ) - instances.gt_masks = masks.tensor - # Placeholder bounding boxes for stuff regions. Note that these are not used during training. - instances.gt_bboxes = torch.stack([torch.tensor([0., 0., 1., 1.])] * instances.gt_masks.shape[0]) - return instances, texts, label - - def _get_instance_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj): - instances = Instances(image_shape) - - classes = [] - texts = ["an instance photo"] * self.num_queries - masks = [] - label = np.ones_like(pan_seg_gt) * self.ignore_label - - for segment_info in segments_info: - class_id = segment_info["category_id"] - if class_id in self.things: - if not segment_info["iscrowd"]: - mask = pan_seg_gt == segment_info["id"] - if not np.all(mask == False): - cls_name = self.class_names[class_id] - classes.append(class_id) - masks.append(mask) - num_class_obj[cls_name] += 1 - label[mask] = class_id - - num = 0 - for i, cls_name in enumerate(self.class_names): - if num_class_obj[cls_name] > 0: - for _ in range(num_class_obj[cls_name]): - if num >= len(texts): - break - texts[num] = f"a photo with a {cls_name}" - num += 1 - - classes = np.array(classes) - instances.gt_classes = torch.tensor(classes, dtype=torch.int64) - if len(masks) == 0: - # Some image does not have annotation (all ignored) - instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1])) - instances.gt_bboxes = torch.zeros((0, 4)) - else: - masks = BitMasks( - torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks]) - ) - instances.gt_masks = masks.tensor - instances.gt_bboxes = masks_to_boxes(instances.gt_masks) - return instances, texts, label - - def _get_panoptic_dict(self, pan_seg_gt, image_shape, segments_info, num_class_obj): - instances = Instances(image_shape) - - classes = [] - texts = ["a panoptic photo"] * self.num_queries - masks = [] - label = np.ones_like(pan_seg_gt) * self.ignore_label - - for segment_info in segments_info: - class_id = segment_info["category_id"] - if not segment_info["iscrowd"]: - mask = pan_seg_gt == segment_info["id"] - if not np.all(mask == False): - cls_name = self.class_names[class_id] - classes.append(class_id) - masks.append(mask) - num_class_obj[cls_name] += 1 - label[mask] = class_id - - num = 0 - for i, cls_name in enumerate(self.class_names): - if num_class_obj[cls_name] > 0: - for _ in range(num_class_obj[cls_name]): - if num >= len(texts): - break - texts[num] = f"a photo with a {cls_name}" - num += 1 - - classes = np.array(classes) - instances.gt_classes = torch.tensor(classes, dtype=torch.int64) - if len(masks) == 0: - # Some image does not have annotation (all ignored) - instances.gt_masks = torch.zeros((0, pan_seg_gt.shape[-2], pan_seg_gt.shape[-1])) - instances.gt_bboxes = torch.zeros((0, 4)) - else: - masks = BitMasks( - torch.stack([torch.from_numpy(np.ascontiguousarray(x.copy())) for x in masks]) - ) - instances.gt_masks = masks.tensor - instances.gt_bboxes = masks_to_boxes(instances.gt_masks) - for i in range(instances.gt_classes.shape[0]): - # Placeholder bounding boxes for stuff regions. Note that these are not used during training. - if instances.gt_classes[i].item() not in self.things: - instances.gt_bboxes[i] = torch.tensor([0., 0., 1., 1.]) - return instances, texts, label - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - - Returns: - dict: a format that builtin models in detectron2 accept - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - image = utils.read_image(dataset_dict["file_name"], format=self.img_format) - utils.check_image_size(dataset_dict, image) - - image, transforms = T.apply_transform_gens(self.tfm_gens, image) - image_shape = image.shape[:2] # h, w - - # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) - - if not self.is_train: - # USER: Modify this if you want to keep them for some reason. - dataset_dict.pop("annotations", None) - return dataset_dict - - # semantic segmentation - if "sem_seg_file_name" in dataset_dict: - # PyTorch transformation not implemented for uint16, so converting it to double first - sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name")).astype("double") - sem_seg_gt = transforms.apply_segmentation(sem_seg_gt) - else: - sem_seg_gt = None - - if "pan_seg_file_name" in dataset_dict: - pan_seg_gt = utils.read_image(dataset_dict.pop("pan_seg_file_name"), "RGB") - segments_info = dataset_dict["segments_info"] - - # apply the same transformation to panoptic segmentation - pan_seg_gt = transforms.apply_segmentation(pan_seg_gt) - - from panopticapi.utils import rgb2id - pan_seg_gt = rgb2id(pan_seg_gt) - - prob_task = np.random.uniform(0,1.) - - num_class_obj = {} - - for name in self.class_names: - num_class_obj[name] = 0 - - if prob_task < self.semantic_prob: - task = "The task is semantic" - instances, text, sem_seg = self._get_semantic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj) - elif prob_task < self.instance_prob: - task = "The task is instance" - instances, text, sem_seg = self._get_instance_dict(pan_seg_gt, image_shape, segments_info, num_class_obj) - else: - task = "The task is panoptic" - instances, text, sem_seg = self._get_panoptic_dict(pan_seg_gt, image_shape, segments_info, num_class_obj) - - - dataset_dict["sem_seg"] = torch.from_numpy(sem_seg).long() - dataset_dict["instances"] = instances - dataset_dict["orig_shape"] = image_shape - dataset_dict["task"] = task - dataset_dict["text"] = text - dataset_dict["thing_ids"] = self.things - - return dataset_dict diff --git a/spaces/LaynzKunz/Advanced-RVC-Inference/config.py b/spaces/LaynzKunz/Advanced-RVC-Inference/config.py deleted file mode 100644 index 20d1ff5e1c00aeccd31191294502d40b2738d249..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/Advanced-RVC-Inference/config.py +++ /dev/null @@ -1,96 +0,0 @@ -import argparse -import sys -import torch -from multiprocessing import cpu_count - -class Config: - def __init__(self): - self.device = "cuda:0" - self.is_half = True - self.n_cpu = 0 - self.gpu_name = None - self.gpu_mem = None - ( - self.colab, - self.api, - ) = self.arg_parse() - self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() - - @staticmethod - def arg_parse() -> tuple: - parser = argparse.ArgumentParser() - parser.add_argument("--colab", action="store_true", help="Launch in colab") - parser.add_argument("--api", action="store_true", help="Launch with api") - cmd_opts = parser.parse_args() - - return ( - cmd_opts.colab, - cmd_opts.api - ) - - # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. - # check `getattr` and try it for compatibility - @staticmethod - def has_mps() -> bool: - if not torch.backends.mps.is_available(): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False - - def device_config(self) -> tuple: - if torch.cuda.is_available(): - i_device = int(self.device.split(":")[-1]) - self.gpu_name = torch.cuda.get_device_name(i_device) - if ( - ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) - or "P40" in self.gpu_name.upper() - or "1060" in self.gpu_name - or "1070" in self.gpu_name - or "1080" in self.gpu_name - ): - print("INFO: Found GPU", self.gpu_name, ", force to fp32") - self.is_half = False - else: - print("INFO: Found GPU", self.gpu_name) - self.gpu_mem = int( - torch.cuda.get_device_properties(i_device).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - elif self.has_mps(): - print("INFO: No supported Nvidia GPU found, use MPS instead") - self.device = "mps" - self.is_half = False - else: - print("INFO: No supported Nvidia GPU found, use CPU instead") - self.device = "cpu" - self.is_half = False - - if self.n_cpu == 0: - self.n_cpu = cpu_count() - - if self.is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 - else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 - - if self.gpu_mem != None and self.gpu_mem <= 4: - x_pad = 1 - x_query = 5 - x_center = 30 - x_max = 32 - - return x_pad, x_query, x_center, x_max diff --git a/spaces/MKFMIKU/Bi-Noising.Diffusion/README.md b/spaces/MKFMIKU/Bi-Noising.Diffusion/README.md deleted file mode 100644 index c4810e4adbe2d346a68937d4675a2ea0da3579a1..0000000000000000000000000000000000000000 --- a/spaces/MKFMIKU/Bi-Noising.Diffusion/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Bi Noising.Diffusion -emoji: 💊 -colorFrom: purple -colorTo: red -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/util/__init__.py b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/util/__init__.py deleted file mode 100644 index 168f9979a4623806934b0ff1102ac166704e7dec..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/util/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/spaces/Marshalls/testmtd/analysis/aistplusplus_api/smpl/smpl_webuser/lbs.py b/spaces/Marshalls/testmtd/analysis/aistplusplus_api/smpl/smpl_webuser/lbs.py deleted file mode 100644 index 4bf5165e82e3df9f1b71011f5e7e657681da9565..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/analysis/aistplusplus_api/smpl/smpl_webuser/lbs.py +++ /dev/null @@ -1,80 +0,0 @@ -''' -Copyright 2015 Matthew Loper, Naureen Mahmood and the Max Planck Gesellschaft. All rights reserved. -This software is provided for research purposes only. -By using this software you agree to the terms of the SMPL Model license here http://smpl.is.tue.mpg.de/license - -More information about SMPL is available here http://smpl.is.tue.mpg. -For comments or questions, please email us at: smpl@tuebingen.mpg.de - - -About this file: -================ -This file defines linear blend skinning for the SMPL loader which -defines the effect of bones and blendshapes on the vertices of the template mesh. - -Modules included: -- global_rigid_transformation: - computes global rotation & translation of the model -- verts_core: [overloaded function inherited from verts.verts_core] - computes the blending of joint-influences for each vertex based on type of skinning - -''' - -from posemapper import posemap -import chumpy -import numpy as np - -def global_rigid_transformation(pose, J, kintree_table, xp): - results = {} - pose = pose.reshape((-1,3)) - id_to_col = {kintree_table[1,i] : i for i in range(kintree_table.shape[1])} - parent = {i : id_to_col[kintree_table[0,i]] for i in range(1, kintree_table.shape[1])} - - if xp == chumpy: - from posemapper import Rodrigues - rodrigues = lambda x : Rodrigues(x) - else: - import cv2 - rodrigues = lambda x : cv2.Rodrigues(x)[0] - - with_zeros = lambda x : xp.vstack((x, xp.array([[0.0, 0.0, 0.0, 1.0]]))) - results[0] = with_zeros(xp.hstack((rodrigues(pose[0,:]), J[0,:].reshape((3,1))))) - - for i in range(1, kintree_table.shape[1]): - results[i] = results[parent[i]].dot(with_zeros(xp.hstack(( - rodrigues(pose[i,:]), - ((J[i,:] - J[parent[i],:]).reshape((3,1))) - )))) - - pack = lambda x : xp.hstack([np.zeros((4, 3)), x.reshape((4,1))]) - - results = [results[i] for i in sorted(results.keys())] - results_global = results - - if True: - results2 = [results[i] - (pack( - results[i].dot(xp.concatenate( ( (J[i,:]), 0 ) ))) - ) for i in range(len(results))] - results = results2 - result = xp.dstack(results) - return result, results_global - - -def verts_core(pose, v, J, weights, kintree_table, want_Jtr=False, xp=chumpy): - A, A_global = global_rigid_transformation(pose, J, kintree_table, xp) - T = A.dot(weights.T) - - rest_shape_h = xp.vstack((v.T, np.ones((1, v.shape[0])))) - - v =(T[:,0,:] * rest_shape_h[0, :].reshape((1, -1)) + - T[:,1,:] * rest_shape_h[1, :].reshape((1, -1)) + - T[:,2,:] * rest_shape_h[2, :].reshape((1, -1)) + - T[:,3,:] * rest_shape_h[3, :].reshape((1, -1))).T - - v = v[:,:3] - - if not want_Jtr: - return v - Jtr = xp.vstack([g[:3,3] for g in A_global]) - return (v, Jtr) - diff --git a/spaces/Marshalls/testmtd/feature_extraction/madmom/ml/crf.py b/spaces/Marshalls/testmtd/feature_extraction/madmom/ml/crf.py deleted file mode 100644 index 04f67d19cb26c3d0c87c0e488a7878f84f034ddb..0000000000000000000000000000000000000000 --- a/spaces/Marshalls/testmtd/feature_extraction/madmom/ml/crf.py +++ /dev/null @@ -1,119 +0,0 @@ -# encoding: utf-8 -""" -This module contains an implementation of Conditional Random Fields (CRFs) -""" -# pylint: disable=no-member -# pylint: disable=invalid-name -import numpy as np - -from ..processors import Processor - - -class ConditionalRandomField(Processor): - """ - Implements a linear-chain Conditional Random Field using a - matrix-based definition: - - .. math:: - P(Y|X) = exp[E(Y,X)] / Σ_{Y'}[E(Y', X)] - - E(Y,X) = Σ_{i=1}^{N} [y_{n-1}^T A y_n + y_n^T c + x_n^T W y_n ] + - y_0^T π + y_N^T τ, - - where Y is a sequence of labels in one-hot encoding and X are the observed - features. - - Parameters - ---------- - initial : numpy array - Initial potential (π) of the CRF. Also defines the number of states. - final : numpy array - Potential (τ) of the last variable of the CRF. - bias : numpy array - Label bias potential (c). - transition : numpy array - Matrix defining the transition potentials (A), where the rows are the - 'from' dimension, and columns the 'to' dimension. - observation : numpy array - Matrix defining the observation potentials (W), where the rows are the - 'observation' dimension, and columns the 'state' dimension. - - Examples - -------- - Create a CRF that emulates a simple hidden markov model. This means that - the bias and final potential will be constant and thus have no effect - on the predictions. - - >>> eta = np.spacing(1) # for numerical stability - >>> initial = np.log(np.array([0.7, 0.2, 0.1]) + eta) - >>> final = np.ones(3) - >>> bias = np.ones(3) - >>> transition = np.log(np.array([[0.6, 0.2, 0.2], - ... [0.1, 0.7, 0.2], - ... [0.1, 0.1, 0.8]]) + eta) - >>> observation = np.log(np.array([[0.9, 0.5, 0.1], - ... [0.1, 0.5, 0.1]]) + eta) - >>> crf = ConditionalRandomField(initial, final, bias, - ... transition, observation) - >>> crf # doctest: +ELLIPSIS -{1} ({2})
'.format(str(48*float(lyrics_score)), lyrics, round(lyrics_score,3)) - st.markdown(lyrics_result, unsafe_allow_html=True) - except: - - pass - - # Chart Display Cluster and Query Results - st.subheader('Embeddings Clusters:') - - # encode charts - embeddings_cluster_chart = alt.Chart(sampling_df).mark_circle().encode( - x=alt.X('component1:Q'), - y=alt.Y('component2:Q'), - color=alt.Color('cluster:O', scale=alt.Scale(scheme='tableau10')), - tooltip=['lyrics', 'cluster'] - ) - # encode query - query_chart = alt.Chart(query_df).mark_square(size=80, color='red').encode( - x=alt.X('component1:Q'), - y=alt.Y('component2:Q'), - tooltip='query' - ) - # display 2 charts layer on each other - chart = (embeddings_cluster_chart+query_chart).interactive() - st.altair_chart(chart, use_container_width=True) - -## Image upload -image_input = st.file_uploader("Or upload an image (experimental)", type=['.png','jpg'], accept_multiple_files=False, key="img") -## valence_range -valence_range_img = st.slider('Choose your happiness level',0, 10, (0, 10), key="img_mood") -valence_min_img = valence_range_img[0]/10 -valence_max_img = valence_range_img[1]/10 -## embeddings generation + semantic searching -#Visuals - image search results -if image_input is not None: - query_img = Image.open(image_input) - st.subheader('Image Uploaded:') - st.image(query_img, width=200) - st.markdown('It might take a few moments to generate results') - model_img = load_model("clip-ViT-B-32") - with st.spinner('Generating results...'): - df_img, df_results_img = load_results_img(query_img) - - ## Reminder - - # Songs recommendation list - st.header('Results') - - with st.container(): - - col1, col2 = st.columns([2, 1]) - - with col1: - st.subheader('Songs:') - grid_table_img = load_grid_table(df_results_img) - with col2: - st.subheader('Lyrics:') - st.write("Credits: Genius Community") - selected_row_img = grid_table_img["selected_rows"] - if selected_row_img is not None: - try: - #st.write(selected_row) - selected_row_index_img = selected_row_img[0]['_selectedRowNodeInfo']['nodeRowIndex'] - #st.write(selected_row_index) - tuple_lyrics_results_img = df_img['lyrics_scores'].iloc[int(selected_row_index_img)] - #st.write(dic_lyrics_results) - for result in tuple_lyrics_results_img: - lyrics = result[0] - lyrics_score = result[1] - original_title_img = '{1} ({2})
'.format(str(48*float(lyrics_score)), lyrics, round(lyrics_score,3)) - st.markdown(original_title_img, unsafe_allow_html=True) - except: - #st.write('cannot fetch lyrics') - pass - else: - st.write('') - #st.write('no results selected') \ No newline at end of file diff --git a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/ChatgptLogin.py b/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/ChatgptLogin.py deleted file mode 100644 index 9551d15dd5121c4b42f80d0ba547a10f0868563b..0000000000000000000000000000000000000000 --- a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/ChatgptLogin.py +++ /dev/null @@ -1,96 +0,0 @@ -import os -from ...typing import sha256, Dict, get_type_hints -import requests -import re -import base64 - -url = 'https://chatgptlogin.ac' -model = ['gpt-3.5-turbo'] -supports_stream = False -needs_auth = False - - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - def get_nonce(): - res = requests.get('https://chatgptlogin.ac/use-chatgpt-free/', headers={ - "Referer": "https://chatgptlogin.ac/use-chatgpt-free/", - "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' - }) - - src = re.search(r'class="mwai-chat mwai-chatgpt">.*Send - - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/merve/uncertainty-calibration/public/dataset-worldviews/interface-images.js b/spaces/merve/uncertainty-calibration/public/dataset-worldviews/interface-images.js deleted file mode 100644 index 5e7040a3a979423e2c88cdbf8c4e5e840a5b35d0..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/dataset-worldviews/interface-images.js +++ /dev/null @@ -1,8 +0,0 @@ -function createInterfaceImage(divName){ - - var c = d3.conventions({ - sel: d3.select('.' + divName).html('') - }) - - -} \ No newline at end of file diff --git a/spaces/merve/voice-cloning/app.py b/spaces/merve/voice-cloning/app.py deleted file mode 100644 index f12b0afc3c960bee4433c4409a87fca68abca8c1..0000000000000000000000000000000000000000 --- a/spaces/merve/voice-cloning/app.py +++ /dev/null @@ -1,303 +0,0 @@ -import json -import os -import subprocess -from pathlib import Path - -import gradio as gr -import librosa -import numpy as np -import torch -from demucs.apply import apply_model -from demucs.pretrained import DEFAULT_MODEL, get_model -from huggingface_hub import hf_hub_download, list_repo_files - -from so_vits_svc_fork.hparams import HParams -from so_vits_svc_fork.inference.core import Svc - - -################################################################### -# REPLACE THESE VALUES TO CHANGE THE MODEL REPO/CKPT NAME/SETTINGS -################################################################### -# The Hugging Face Hub repo ID -repo_id = "merve/svc-test" - -# If None, Uses latest ckpt in the repo -ckpt_name = None - -# If None, Uses "kmeans.pt" if it exists in the repo -cluster_model_name = None - -# Set the default f0 type to use - use the one it was trained on. -# The default for so-vits-svc-fork is "dio". -# Options: "crepe", "crepe-tiny", "parselmouth", "dio", "harvest" -default_f0_method = "crepe" - -# The default ratio of cluster inference to SVC inference. -# If cluster_model_name is not found in the repo, this is set to 0. -default_cluster_infer_ratio = 0.5 - -# Limit on duration of audio at inference time. increase if you can -# In this parent app, we set the limit with an env var to 30 seconds -# If you didnt set env var + you go OOM try changing 9e9 to <=300ish -duration_limit = int(os.environ.get("MAX_DURATION_SECONDS", 9e9)) -################################################################### - -# Figure out the latest generator by taking highest value one. -# Ex. if the repo has: G_0.pth, G_100.pth, G_200.pth, we'd use G_200.pth -if ckpt_name is None: - latest_id = sorted( - [ - int(Path(x).stem.split("_")[1]) - for x in list_repo_files(repo_id) - if x.startswith("G_") and x.endswith(".pth") - ] - )[-1] - ckpt_name = f"G_{latest_id}.pth" - -cluster_model_name = cluster_model_name or "kmeans.pt" -if cluster_model_name in list_repo_files(repo_id): - print(f"Found Cluster model - Downloading {cluster_model_name} from {repo_id}") - cluster_model_path = hf_hub_download(repo_id, cluster_model_name) -else: - print(f"Could not find {cluster_model_name} in {repo_id}. Using None") - cluster_model_path = None -default_cluster_infer_ratio = default_cluster_infer_ratio if cluster_model_path else 0 - -generator_path = hf_hub_download(repo_id, ckpt_name) -config_path = hf_hub_download(repo_id, "config.json") -hparams = HParams(**json.loads(Path(config_path).read_text())) -speakers = list(hparams.spk.keys()) -device = "cuda" if torch.cuda.is_available() else "cpu" -model = Svc(net_g_path=generator_path, config_path=config_path, device=device, cluster_model_path=cluster_model_path) -demucs_model = get_model(DEFAULT_MODEL) - - -def extract_vocal_demucs(model, filename, sr=44100, device=None, shifts=1, split=True, overlap=0.25, jobs=0): - wav, sr = librosa.load(filename, mono=False, sr=sr) - wav = torch.tensor(wav) - ref = wav.mean(0) - wav = (wav - ref.mean()) / ref.std() - sources = apply_model( - model, wav[None], device=device, shifts=shifts, split=split, overlap=overlap, progress=True, num_workers=jobs - )[0] - sources = sources * ref.std() + ref.mean() - # We take just the vocals stem. I know the vocals for this model are at index -1 - # If using different model, check model.sources.index('vocals') - vocal_wav = sources[-1] - # I did this because its the same normalization the so-vits model required - vocal_wav = vocal_wav / max(1.01 * vocal_wav.abs().max(), 1) - vocal_wav = vocal_wav.numpy() - vocal_wav = librosa.to_mono(vocal_wav) - vocal_wav = vocal_wav.T - instrumental_wav = sources[:-1].sum(0).numpy().T - return vocal_wav, instrumental_wav - - -def download_youtube_clip( - video_identifier, - start_time, - end_time, - output_filename, - num_attempts=5, - url_base="https://www.youtube.com/watch?v=", - quiet=False, - force=False, -): - output_path = Path(output_filename) - if output_path.exists(): - if not force: - return output_path - else: - output_path.unlink() - - quiet = "--quiet --no-warnings" if quiet else "" - command = f""" - yt-dlp {quiet} -x --audio-format wav -f bestaudio -o "{output_filename}" --download-sections "*{start_time}-{end_time}" "{url_base}{video_identifier}" # noqa: E501 - """.strip() - - attempts = 0 - while True: - try: - _ = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - attempts += 1 - if attempts == num_attempts: - return None - else: - break - - if output_path.exists(): - return output_path - else: - return None - - -def predict( - speaker, - audio, - transpose: int = 0, - auto_predict_f0: bool = False, - cluster_infer_ratio: float = 0, - noise_scale: float = 0.4, - f0_method: str = "crepe", - db_thresh: int = -40, - pad_seconds: float = 0.5, - chunk_seconds: float = 0.5, - absolute_thresh: bool = False, -): - audio, _ = librosa.load(audio, sr=model.target_sample, duration=duration_limit) - audio = model.infer_silence( - audio.astype(np.float32), - speaker=speaker, - transpose=transpose, - auto_predict_f0=auto_predict_f0, - cluster_infer_ratio=cluster_infer_ratio, - noise_scale=noise_scale, - f0_method=f0_method, - db_thresh=db_thresh, - pad_seconds=pad_seconds, - chunk_seconds=chunk_seconds, - absolute_thresh=absolute_thresh, - ) - return model.target_sample, audio - - -def predict_song_from_yt( - ytid_or_url, - start, - end, - speaker=speakers[0], - transpose: int = 0, - auto_predict_f0: bool = False, - cluster_infer_ratio: float = 0, - noise_scale: float = 0.4, - f0_method: str = "dio", - db_thresh: int = -40, - pad_seconds: float = 0.5, - chunk_seconds: float = 0.5, - absolute_thresh: bool = False, -): - end = min(start + duration_limit, end) - original_track_filepath = download_youtube_clip( - ytid_or_url, - start, - end, - "track.wav", - force=True, - url_base="" if ytid_or_url.startswith("http") else "https://www.youtube.com/watch?v=", - ) - vox_wav, inst_wav = extract_vocal_demucs(demucs_model, original_track_filepath) - if transpose != 0: - inst_wav = librosa.effects.pitch_shift(inst_wav.T, sr=model.target_sample, n_steps=transpose).T - cloned_vox = model.infer_silence( - vox_wav.astype(np.float32), - speaker=speaker, - transpose=transpose, - auto_predict_f0=auto_predict_f0, - cluster_infer_ratio=cluster_infer_ratio, - noise_scale=noise_scale, - f0_method=f0_method, - db_thresh=db_thresh, - pad_seconds=pad_seconds, - chunk_seconds=chunk_seconds, - absolute_thresh=absolute_thresh, - ) - full_song = inst_wav + np.expand_dims(cloned_vox, 1) - return (model.target_sample, full_song), (model.target_sample, cloned_vox) - - -SPACE_ID = "nateraw/voice-cloning" -description = f""" -# Attention - This Space may be slow in the shared UI if there is a long queue. To speed it up, you can duplicate and use it with a paid private T4 GPU. - -- Github Repo -
-""".strip() - - -interface_mic = gr.Interface( - predict, - inputs=[ - gr.Dropdown(speakers, value=speakers[0], label="Target Speaker"), - gr.Audio(type="filepath", source="microphone", label="Source Audio"), - gr.Slider(-12, 12, value=0, step=1, label="Transpose (Semitones)"), - gr.Checkbox(False, label="Auto Predict F0"), - gr.Slider(0.0, 1.0, value=default_cluster_infer_ratio, step=0.1, label="cluster infer ratio"), - gr.Slider(0.0, 1.0, value=0.4, step=0.1, label="noise scale"), - gr.Dropdown( - choices=["crepe", "crepe-tiny", "parselmouth", "dio", "harvest"], - value=default_f0_method, - label="f0 method", - ), - ], - outputs="audio", - title="Voice Cloning", - description=description, - article=article, -) -interface_file = gr.Interface( - predict, - inputs=[ - gr.Dropdown(speakers, value=speakers[0], label="Target Speaker"), - gr.Audio(type="filepath", source="upload", label="Source Audio"), - gr.Slider(-12, 12, value=0, step=1, label="Transpose (Semitones)"), - gr.Checkbox(False, label="Auto Predict F0"), - gr.Slider(0.0, 1.0, value=default_cluster_infer_ratio, step=0.1, label="cluster infer ratio"), - gr.Slider(0.0, 1.0, value=0.4, step=0.1, label="noise scale"), - gr.Dropdown( - choices=["crepe", "crepe-tiny", "parselmouth", "dio", "harvest"], - value=default_f0_method, - label="f0 method", - ), - ], - outputs="audio", - title="Voice Cloning", - description=description, - article=article, -) -interface_yt = gr.Interface( - predict_song_from_yt, - inputs=[ - gr.Textbox( - label="YouTube URL or ID", info="A YouTube URL (or ID) to a song on YouTube you want to clone from" - ), - gr.Number(value=0, label="Start Time (seconds)"), - gr.Number(value=15, label="End Time (seconds)"), - gr.Dropdown(speakers, value=speakers[0], label="Target Speaker"), - gr.Slider(-12, 12, value=0, step=1, label="Transpose (Semitones)"), - gr.Checkbox(False, label="Auto Predict F0"), - gr.Slider(0.0, 1.0, value=default_cluster_infer_ratio, step=0.1, label="cluster infer ratio"), - gr.Slider(0.0, 1.0, value=0.4, step=0.1, label="noise scale"), - gr.Dropdown( - choices=["crepe", "crepe-tiny", "parselmouth", "dio", "harvest"], - value=default_f0_method, - label="f0 method", - ), - ], - outputs=["audio", "audio"], - title="Voice Cloning", - description=description, - article=article, - examples=[ - ["COz9lDCFHjw", 75, 90, speakers[0], 0, False, default_cluster_infer_ratio, 0.4, default_f0_method], - ["dQw4w9WgXcQ", 21, 35, speakers[0], 0, False, default_cluster_infer_ratio, 0.4, default_f0_method], - ["Wvm5GuDfAas", 15, 30, speakers[0], 0, False, default_cluster_infer_ratio, 0.4, default_f0_method], - ], -) -interface = gr.TabbedInterface( - [interface_mic, interface_file, interface_yt], - ["Clone From Mic", "Clone From File", "Clone Song From YouTube"], -) - - -if __name__ == "__main__": - interface.launch() diff --git a/spaces/mfrashad/ClothingGAN/netdissect/easydict.py b/spaces/mfrashad/ClothingGAN/netdissect/easydict.py deleted file mode 100644 index 0188f524b87eef75c175772ff262b93b47919ba7..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/ClothingGAN/netdissect/easydict.py +++ /dev/null @@ -1,126 +0,0 @@ -''' -From https://github.com/makinacorpus/easydict. -''' - -class EasyDict(dict): - """ - Get attributes - - >>> d = EasyDict({'foo':3}) - >>> d['foo'] - 3 - >>> d.foo - 3 - >>> d.bar - Traceback (most recent call last): - ... - AttributeError: 'EasyDict' object has no attribute 'bar' - - Works recursively - - >>> d = EasyDict({'foo':3, 'bar':{'x':1, 'y':2}}) - >>> isinstance(d.bar, dict) - True - >>> d.bar.x - 1 - - Bullet-proof - - >>> EasyDict({}) - {} - >>> EasyDict(d={}) - {} - >>> EasyDict(None) - {} - >>> d = {'a': 1} - >>> EasyDict(**d) - {'a': 1} - - Set attributes - - >>> d = EasyDict() - >>> d.foo = 3 - >>> d.foo - 3 - >>> d.bar = {'prop': 'value'} - >>> d.bar.prop - 'value' - >>> d - {'foo': 3, 'bar': {'prop': 'value'}} - >>> d.bar.prop = 'newer' - >>> d.bar.prop - 'newer' - - - Values extraction - - >>> d = EasyDict({'foo':0, 'bar':[{'x':1, 'y':2}, {'x':3, 'y':4}]}) - >>> isinstance(d.bar, list) - True - >>> from operator import attrgetter - >>> map(attrgetter('x'), d.bar) - [1, 3] - >>> map(attrgetter('y'), d.bar) - [2, 4] - >>> d = EasyDict() - >>> d.keys() - [] - >>> d = EasyDict(foo=3, bar=dict(x=1, y=2)) - >>> d.foo - 3 - >>> d.bar.x - 1 - - Still like a dict though - - >>> o = EasyDict({'clean':True}) - >>> o.items() - [('clean', True)] - - And like a class - - >>> class Flower(EasyDict): - ... power = 1 - ... - >>> f = Flower() - >>> f.power - 1 - >>> f = Flower({'height': 12}) - >>> f.height - 12 - >>> f['power'] - 1 - >>> sorted(f.keys()) - ['height', 'power'] - """ - def __init__(self, d=None, **kwargs): - if d is None: - d = {} - if kwargs: - d.update(**kwargs) - for k, v in d.items(): - setattr(self, k, v) - # Class attributes - for k in self.__class__.__dict__.keys(): - if not (k.startswith('__') and k.endswith('__')): - setattr(self, k, getattr(self, k)) - - def __setattr__(self, name, value): - if isinstance(value, (list, tuple)): - value = [self.__class__(x) - if isinstance(x, dict) else x for x in value] - elif isinstance(value, dict) and not isinstance(value, self.__class__): - value = self.__class__(value) - super(EasyDict, self).__setattr__(name, value) - super(EasyDict, self).__setitem__(name, value) - - __setitem__ = __setattr__ - -def load_json(filename): - import json - with open(filename) as f: - return EasyDict(json.load(f)) - -if __name__ == "__main__": - import doctest - doctest.testmod() diff --git a/spaces/mfrashad/ClothingGAN/netdissect/nethook.py b/spaces/mfrashad/ClothingGAN/netdissect/nethook.py deleted file mode 100644 index f36e84ee0cae2de2c3be247498408cf66db3ee8f..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/ClothingGAN/netdissect/nethook.py +++ /dev/null @@ -1,266 +0,0 @@ -''' -Utilities for instrumenting a torch model. - -InstrumentedModel will wrap a pytorch model and allow hooking -arbitrary layers to monitor or modify their output directly. - -Modified by Erik Härkönen: -- 29.11.2019: Unhooking bugfix -- 25.01.2020: Offset edits, removed old API -''' - -import torch, numpy, types -from collections import OrderedDict - -class InstrumentedModel(torch.nn.Module): - ''' - A wrapper for hooking, probing and intervening in pytorch Modules. - Example usage: - - ``` - model = load_my_model() - with inst as InstrumentedModel(model): - inst.retain_layer(layername) - inst.edit_layer(layername, 0.5, target_features) - inst.edit_layer(layername, offset=offset_tensor) - inst(inputs) - original_features = inst.retained_layer(layername) - ``` - ''' - - def __init__(self, model): - super(InstrumentedModel, self).__init__() - self.model = model - self._retained = OrderedDict() - self._ablation = {} - self._replacement = {} - self._offset = {} - self._hooked_layer = {} - self._old_forward = {} - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def forward(self, *inputs, **kwargs): - return self.model(*inputs, **kwargs) - - def retain_layer(self, layername): - ''' - Pass a fully-qualified layer name (E.g., module.submodule.conv3) - to hook that layer and retain its output each time the model is run. - A pair (layername, aka) can be provided, and the aka will be used - as the key for the retained value instead of the layername. - ''' - self.retain_layers([layername]) - - def retain_layers(self, layernames): - ''' - Retains a list of a layers at once. - ''' - self.add_hooks(layernames) - for layername in layernames: - aka = layername - if not isinstance(aka, str): - layername, aka = layername - if aka not in self._retained: - self._retained[aka] = None - - def retained_features(self): - ''' - Returns a dict of all currently retained features. - ''' - return OrderedDict(self._retained) - - def retained_layer(self, aka=None, clear=False): - ''' - Retrieve retained data that was previously hooked by retain_layer. - Call this after the model is run. If clear is set, then the - retained value will return and also cleared. - ''' - if aka is None: - # Default to the first retained layer. - aka = next(self._retained.keys().__iter__()) - result = self._retained[aka] - if clear: - self._retained[aka] = None - return result - - def edit_layer(self, layername, ablation=None, replacement=None, offset=None): - ''' - Pass a fully-qualified layer name (E.g., module.submodule.conv3) - to hook that layer and modify its output each time the model is run. - The output of the layer will be modified to be a convex combination - of the replacement and x interpolated according to the ablation, i.e.: - `output = x * (1 - a) + (r * a)`. - Additionally or independently, an offset can be added to the output. - ''' - if not isinstance(layername, str): - layername, aka = layername - else: - aka = layername - - # The default ablation if a replacement is specified is 1.0. - if ablation is None and replacement is not None: - ablation = 1.0 - self.add_hooks([(layername, aka)]) - if ablation is not None: - self._ablation[aka] = ablation - if replacement is not None: - self._replacement[aka] = replacement - if offset is not None: - self._offset[aka] = offset - # If needed, could add an arbitrary postprocessing lambda here. - - def remove_edits(self, layername=None, remove_offset=True, remove_replacement=True): - ''' - Removes edits at the specified layer, or removes edits at all layers - if no layer name is specified. - ''' - if layername is None: - if remove_replacement: - self._ablation.clear() - self._replacement.clear() - if remove_offset: - self._offset.clear() - return - - if not isinstance(layername, str): - layername, aka = layername - else: - aka = layername - if remove_replacement and aka in self._ablation: - del self._ablation[aka] - if remove_replacement and aka in self._replacement: - del self._replacement[aka] - if remove_offset and aka in self._offset: - del self._offset[aka] - - def add_hooks(self, layernames): - ''' - Sets up a set of layers to be hooked. - - Usually not called directly: use edit_layer or retain_layer instead. - ''' - needed = set() - aka_map = {} - for name in layernames: - aka = name - if not isinstance(aka, str): - name, aka = name - if self._hooked_layer.get(aka, None) != name: - aka_map[name] = aka - needed.add(name) - if not needed: - return - for name, layer in self.model.named_modules(): - if name in aka_map: - needed.remove(name) - aka = aka_map[name] - self._hook_layer(layer, name, aka) - for name in needed: - raise ValueError('Layer %s not found in model' % name) - - def _hook_layer(self, layer, layername, aka): - ''' - Internal method to replace a forward method with a closure that - intercepts the call, and tracks the hook so that it can be reverted. - ''' - if aka in self._hooked_layer: - raise ValueError('Layer %s already hooked' % aka) - if layername in self._old_forward: - raise ValueError('Layer %s already hooked' % layername) - self._hooked_layer[aka] = layername - self._old_forward[layername] = (layer, aka, - layer.__dict__.get('forward', None)) - editor = self - original_forward = layer.forward - def new_forward(self, *inputs, **kwargs): - original_x = original_forward(*inputs, **kwargs) - x = editor._postprocess_forward(original_x, aka) - return x - layer.forward = types.MethodType(new_forward, layer) - - def _unhook_layer(self, aka): - ''' - Internal method to remove a hook, restoring the original forward method. - ''' - if aka not in self._hooked_layer: - return - layername = self._hooked_layer[aka] - layer, check, old_forward = self._old_forward[layername] - assert check == aka - if old_forward is None: - if 'forward' in layer.__dict__: - del layer.__dict__['forward'] - else: - layer.forward = old_forward - del self._old_forward[layername] - del self._hooked_layer[aka] - if aka in self._ablation: - del self._ablation[aka] - if aka in self._replacement: - del self._replacement[aka] - if aka in self._offset: - del self._offset[aka] - if aka in self._retained: - del self._retained[aka] - - def _postprocess_forward(self, x, aka): - ''' - The internal method called by the hooked layers after they are run. - ''' - # Retain output before edits, if desired. - if aka in self._retained: - self._retained[aka] = x.detach() - - # Apply replacement edit - a = make_matching_tensor(self._ablation, aka, x) - if a is not None: - x = x * (1 - a) - v = make_matching_tensor(self._replacement, aka, x) - if v is not None: - x += (v * a) - - # Apply offset edit - b = make_matching_tensor(self._offset, aka, x) - if b is not None: - x = x + b - - return x - - def close(self): - ''' - Unhooks all hooked layers in the model. - ''' - for aka in list(self._old_forward.keys()): - self._unhook_layer(aka) - assert len(self._old_forward) == 0 - - -def make_matching_tensor(valuedict, name, data): - ''' - Converts `valuedict[name]` to be a tensor with the same dtype, device, - and dimension count as `data`, and caches the converted tensor. - ''' - v = valuedict.get(name, None) - if v is None: - return None - if not isinstance(v, torch.Tensor): - # Accept non-torch data. - v = torch.from_numpy(numpy.array(v)) - valuedict[name] = v - if not v.device == data.device or not v.dtype == data.dtype: - # Ensure device and type matches. - assert not v.requires_grad, '%s wrong device or type' % (name) - v = v.to(device=data.device, dtype=data.dtype) - valuedict[name] = v - if len(v.shape) < len(data.shape): - # Ensure dimensions are unsqueezed as needed. - assert not v.requires_grad, '%s wrong dimensions' % (name) - v = v.view((1,) + tuple(v.shape) + - (1,) * (len(data.shape) - len(v.shape) - 1)) - valuedict[name] = v - return v diff --git a/spaces/mfrashad/ClothingGAN/netdissect/upsegmodel/prroi_pool/build.py b/spaces/mfrashad/ClothingGAN/netdissect/upsegmodel/prroi_pool/build.py deleted file mode 100644 index b198790817a2d11d65d6211b011f9408d9d34270..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/ClothingGAN/netdissect/upsegmodel/prroi_pool/build.py +++ /dev/null @@ -1,50 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# File : build.py -# Author : Jiayuan Mao, Tete Xiao -# Email : maojiayuan@gmail.com, jasonhsiao97@gmail.com -# Date : 07/13/2018 -# -# This file is part of PreciseRoIPooling. -# Distributed under terms of the MIT license. -# Copyright (c) 2017 Megvii Technology Limited. - -import os -import torch - -from torch.utils.ffi import create_extension - -headers = [] -sources = [] -defines = [] -extra_objects = [] -with_cuda = False - -if torch.cuda.is_available(): - with_cuda = True - - headers+= ['src/prroi_pooling_gpu.h'] - sources += ['src/prroi_pooling_gpu.c'] - defines += [('WITH_CUDA', None)] - - this_file = os.path.dirname(os.path.realpath(__file__)) - extra_objects_cuda = ['src/prroi_pooling_gpu_impl.cu.o'] - extra_objects_cuda = [os.path.join(this_file, fname) for fname in extra_objects_cuda] - extra_objects.extend(extra_objects_cuda) -else: - # TODO(Jiayuan Mao @ 07/13): remove this restriction after we support the cpu implementation. - raise NotImplementedError('Precise RoI Pooling only supports GPU (cuda) implememtations.') - -ffi = create_extension( - '_prroi_pooling', - headers=headers, - sources=sources, - define_macros=defines, - relative_to=__file__, - with_cuda=with_cuda, - extra_objects=extra_objects -) - -if __name__ == '__main__': - ffi.build() - diff --git a/spaces/mgolu/EDvai_final/app.py b/spaces/mgolu/EDvai_final/app.py deleted file mode 100644 index 6c0a7dc7fd65e8d5127f7cb42a2d34178cb5e2d8..0000000000000000000000000000000000000000 --- a/spaces/mgolu/EDvai_final/app.py +++ /dev/null @@ -1,204 +0,0 @@ -import gradio as gr -import pandas as pd -import pickle - -import os - -# Define params names -PARAMS_NAME = [ - "orderAmount", - "orderState", - "paymentMethodRegistrationFailure", - "paymentMethodType", - "paymentMethodProvider", - "paymentMethodIssuer", - "transactionAmount", - "transactionFailed", - "emailDomain", - "emailProvider", - "customerIPAddressSimplified", - "sameCity" -] - -# Load files -MAIN_FOLDER = os.path.dirname(__file__) - -MODEL_PATH = os.path.join(MAIN_FOLDER, "model/modelo_proyecto_final.pkl") -with open(MODEL_PATH, "rb") as f: - model = pickle.load(f) - -COLUMNS_PATH = "model/categories_ohe_without_fraudulent.pickle" -with open(COLUMNS_PATH, 'rb') as handle: - ohe_tr = pickle.load(handle) - -BINS_ORDER = os.path.join(MAIN_FOLDER, "model/saved_bins_order.pickle") -with open(BINS_ORDER, 'rb') as handle: - new_saved_bins_order = pickle.load(handle) - -BINS_TRANSACTION = os.path.join(MAIN_FOLDER, "model/saved_bins_transaction.pickle") -with open(BINS_TRANSACTION, 'rb') as handle: - new_saved_bins_transaction = pickle.load(handle) - - -def predict(*args): - answer_dict = {} - - for i in range(len(PARAMS_NAME)): - answer_dict[PARAMS_NAME[i]] = [args[i]] - - # Crear dataframe - single_instance = pd.DataFrame.from_dict(answer_dict) - - # Manejar puntos de corte o bins - single_instance["orderAmount"] = single_instance["orderAmount"].astype(float) - single_instance["orderAmount"] = pd.cut(single_instance['orderAmount'], - bins=new_saved_bins_order, - include_lowest=True) - - single_instance["transactionAmount"] = single_instance["transactionAmount"].astype(int) - single_instance["transactionAmount"] = pd.cut(single_instance['transactionAmount'], - bins=new_saved_bins_order, - include_lowest=True) - - # One hot encoding - single_instance_ohe = pd.get_dummies(single_instance).reindex(columns = ohe_tr).fillna(0) - - prediction = model.predict(single_instance_ohe) - - # Cast numpy.int64 to just a int - type_of_fraud = int(prediction[0]) - - # Adaptación respuesta - response = "Error parsing value" - if type_of_fraud == 0: - response = "False" - if type_of_fraud == 1: - response = "True" - if type_of_fraud == 2: - response = "Warning" - - return response - - -with gr.Blocks() as demo: - gr.Markdown( - """ - # Prevención de Fraude 🕵️♀️🕵️ - """ - ) - - with gr.Row(): - with gr.Column(): - - gr.Markdown( - """ - ## Predecir si un cliente es fraudulento o no. - """ - ) - - orderAmount = gr.Slider(label="Order amount", minimum=10, maximum=55, step=5, randomize=True) - - orderState = gr.Radio( - label="Order state", - choices=["failed", "fulfilled", "pending"], - value="failed" - ) - - paymentMethodRegistrationFailure = gr.Radio( - label="Payment method registration failure", - choices=["True", "False"], - value="True" - ) - - paymentMethodType = gr.Radio( - label="Payment Method Type", - choices=["apple pay", "bitcoin", "card", "paypal"], - value="bitcoin" - ) - - paymentMethodProvider = gr.Dropdown( - label="Payment method provider", - choices=["American Express", "Diners Club / Carte Blanche", "Discover", "JCB 15 digit", "JCB 16 digit", "Maestro", "Mastercard", "VISA 13 digit", "VISA 16 digit", "Voyager"], - multiselect=False, - value="American Express", - ) - - paymentMethodIssuer = gr.Dropdown( - label="Payment method issuer", - choices=["Bastion Banks", "Bulwark Trust Corp.", "Citizens First Banks", "Fountain Financial Inc.", "Grand Credit Corporation", "Her Majesty Trust", "His Majesty Bank Corp.", "Rose Bancshares", "Solace Banks", "Vertex Bancorp", "weird"], - multiselect=False, - value="Bastion Banks", - ) - - transactionAmount = gr.Slider(label="Transaction amount", minimum=10, maximum=55, step=1, randomize=True) - - transactionFailed = gr.Radio( - label="Transaction failed", - choices=["True", "False"], - value="False" - ) - - emailDomain = gr.Radio( - label="Email domain", - choices=["biz", "com", "info", "net", "org", "weird"], - value="com" - ) - - emailProvider = gr.Radio( - label="Email provider", - choices=["gmail", "hotmail", "yahoo", "weird", "other"], - value="gmail" - ) - - customerIPAddressSimplified = gr.Radio( - label="Customer IP Address", - choices=["digits_and_letters", "only_digits"], - value="only_letters" - ) - - sameCity = gr.Radio( - label="Same city", - choices=["no", "yes", "unknown"], - value="unknown" - ) - - with gr.Column(): - - gr.Markdown( - """ - ## Predicción - """ - ) - - label = gr.Label(label="Tipo de fraude") - predict_btn = gr.Button(value="Evaluar") - predict_btn.click( - predict, - inputs=[ - orderAmount, - orderState, - paymentMethodRegistrationFailure, - paymentMethodType, - paymentMethodProvider, - paymentMethodIssuer, - transactionAmount, - transactionFailed, - emailDomain, - emailProvider, - customerIPAddressSimplified, - sameCity, - ], - outputs=[label], - api_name="prediccion" - ) - gr.Markdown( - """ -- Proyecto demo creado en el bootcamp de EDVAI 🤗 - -
- """ - ) - -demo.launch(share=False) diff --git a/spaces/mikeee/radiobee-aligner/start-radiobee.bat b/spaces/mikeee/radiobee-aligner/start-radiobee.bat deleted file mode 100644 index e1f2d7cd1ca0b113afb8529e96f480e195c1e457..0000000000000000000000000000000000000000 --- a/spaces/mikeee/radiobee-aligner/start-radiobee.bat +++ /dev/null @@ -1 +0,0 @@ -start "radiobee" run-radiobee \ No newline at end of file diff --git a/spaces/miku-hutao/vits-uma-genshin-honkai/app.py b/spaces/miku-hutao/vits-uma-genshin-honkai/app.py deleted file mode 100644 index ba29f6a5aff153461017c2e11e03a8765581c0d5..0000000000000000000000000000000000000000 --- a/spaces/miku-hutao/vits-uma-genshin-honkai/app.py +++ /dev/null @@ -1,150 +0,0 @@ -# coding=utf-8 -import time -import os -import gradio as gr -import utils -import argparse -import commons -from models import SynthesizerTrn -from text import text_to_sequence -import torch -from torch import no_grad, LongTensor -import webbrowser -import logging -import gradio.processing_utils as gr_processing_utils -logging.getLogger('numba').setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit text and audio length in huggingface spaces - -audio_postprocess_ori = gr.Audio.postprocess -def audio_postprocess(self, y): - data = audio_postprocess_ori(self, y) - if data is None: - return None - return gr_processing_utils.encode_url_or_file_to_base64(data["name"]) -gr.Audio.postprocess = audio_postprocess - -def get_text(text, hps): - text_norm, clean_text = text_to_sequence(text, hps.symbols, hps.data.text_cleaners) - if hps.data.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = LongTensor(text_norm) - return text_norm, clean_text - -def vits(text, language, speaker_id, noise_scale, noise_scale_w, length_scale): - start = time.perf_counter() - if not len(text): - return "输入文本不能为空!", None, None - text = text.replace('\n', ' ').replace('\r', '').replace(" ", "") - if len(text) > 100 and limitation: - return f"输入文字过长!{len(text)}>100", None, None - if language == 0: - text = f"[ZH]{text}[ZH]" - elif language == 1: - text = f"[JA]{text}[JA]" - else: - text = f"{text}" - stn_tst, clean_text = get_text(text, hps_ms) - with no_grad(): - x_tst = stn_tst.unsqueeze(0).to(device) - x_tst_lengths = LongTensor([stn_tst.size(0)]).to(device) - speaker_id = LongTensor([speaker_id]).to(device) - audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=speaker_id, noise_scale=noise_scale, noise_scale_w=noise_scale_w, - length_scale=length_scale)[0][0, 0].data.cpu().float().numpy() - - return "生成成功!", (22050, audio), f"生成耗时 {round(time.perf_counter()-start, 2)} s" - -def search_speaker(search_value): - for s in speakers: - if search_value == s: - return s - for s in speakers: - if search_value in s: - return s - -def change_lang(language): - if language == 0: - return 0.6, 0.668, 1.2 - else: - return 0.6, 0.668, 1.1 - -download_audio_js = """ -() =>{{ - let root = document.querySelector("body > gradio-app"); - if (root.shadowRoot != null) - root = root.shadowRoot; - let audio = root.querySelector("#tts-audio").querySelector("audio"); - let text = root.querySelector("#input-text").querySelector("textarea"); - if (audio == undefined) - return; - text = text.value; - if (text == undefined) - text = Math.floor(Math.random()*100000000); - audio = audio.src; - let oA = document.createElement("a"); - oA.download = text.substr(0, 20)+'.wav'; - oA.href = audio; - document.body.appendChild(oA); - oA.click(); - oA.remove(); -}} -""" - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--device', type=str, default='cpu') - parser.add_argument('--api', action="store_true", default=False) - parser.add_argument("--share", action="store_true", default=False, help="share gradio app") - parser.add_argument("--colab", action="store_true", default=False, help="share gradio app") - args = parser.parse_args() - device = torch.device(args.device) - - hps_ms = utils.get_hparams_from_file(r'./model/config.json') - net_g_ms = SynthesizerTrn( - len(hps_ms.symbols), - hps_ms.data.filter_length // 2 + 1, - hps_ms.train.segment_size // hps_ms.data.hop_length, - n_speakers=hps_ms.data.n_speakers, - **hps_ms.model) - _ = net_g_ms.eval().to(device) - speakers = hps_ms.speakers - model, optimizer, learning_rate, epochs = utils.load_checkpoint(r'./model/G_953000.pth', net_g_ms, None) - - with gr.Blocks() as app: - gr.Markdown( - "#-
- .
- .
- .
- .
-
- > Welcome to the Machine Learning Simulator, based in Universal Paperclips game | -
- -
- Next Upgrade at: 0 Universities
-
- 0
- -
- Next Upgrade at: 0 Drones
-
- - 0 -
- -- - - -
- -- 0
- -- - -
-
-
- Cost: 0 clips
-
-
- 0
- Cost: $ 5.00- 0
- Cost: $ 500Cost: 100 Yomi
-
- Investments
-
-
-
-
-
- Cash: $0
- Stocks: $0
- Total: $0
-
Stock | -Amt. | -Price | -Total | -P/L | -
---|---|---|---|---|
  | -  | -  | -  | -  | -
  | -  | -  | -  | -  | -
  | -  | -  | -  | -  | -
  | -  | -  | -  | -  | -
  | -  | -  | -  | -  | -
- Strategic Modeling
-
-
-
-
-
- Pick strategy, run tournament, gain yomi
-
- | Move A | -Move B | -
---|---|---|
Move A | -0,0 | -0,0 | - -
Move B | -0,0 | -0,0 | - -
  | -  | -
  | -  | -
  | -  | -
  | -  | -
Yomi: 0 -
-Cost: 1,000 ops
-- - Drifter Attack 1
- +200 honor -
- -
- Scale = 1:1
-
- Factories: 0 MWs
- Drones: 0 MWs
-
- 0
- -- -
- -- 0
- - -- -
- -
-
- Von Neumann Probe Design
- Trust:
- 0 / 0
- (0 Max)
-
-
Cost: 0 yomi
-