diff --git a/spaces/123Kumar/vits-uma-genshin-honkai123/transforms.py b/spaces/123Kumar/vits-uma-genshin-honkai123/transforms.py
deleted file mode 100644
index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000
--- a/spaces/123Kumar/vits-uma-genshin-honkai123/transforms.py
+++ /dev/null
@@ -1,193 +0,0 @@
-import torch
-from torch.nn import functional as F
-
-import numpy as np
-
-
-DEFAULT_MIN_BIN_WIDTH = 1e-3
-DEFAULT_MIN_BIN_HEIGHT = 1e-3
-DEFAULT_MIN_DERIVATIVE = 1e-3
-
-
-def piecewise_rational_quadratic_transform(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails=None,
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
-
- if tails is None:
- spline_fn = rational_quadratic_spline
- spline_kwargs = {}
- else:
- spline_fn = unconstrained_rational_quadratic_spline
- spline_kwargs = {
- 'tails': tails,
- 'tail_bound': tail_bound
- }
-
- outputs, logabsdet = spline_fn(
- inputs=inputs,
- unnormalized_widths=unnormalized_widths,
- unnormalized_heights=unnormalized_heights,
- unnormalized_derivatives=unnormalized_derivatives,
- inverse=inverse,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative,
- **spline_kwargs
- )
- return outputs, logabsdet
-
-
-def searchsorted(bin_locations, inputs, eps=1e-6):
- bin_locations[..., -1] += eps
- return torch.sum(
- inputs[..., None] >= bin_locations,
- dim=-1
- ) - 1
-
-
-def unconstrained_rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- tails='linear',
- tail_bound=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
- outside_interval_mask = ~inside_interval_mask
-
- outputs = torch.zeros_like(inputs)
- logabsdet = torch.zeros_like(inputs)
-
- if tails == 'linear':
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
- constant = np.log(np.exp(1 - min_derivative) - 1)
- unnormalized_derivatives[..., 0] = constant
- unnormalized_derivatives[..., -1] = constant
-
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
- logabsdet[outside_interval_mask] = 0
- else:
- raise RuntimeError('{} tails are not implemented.'.format(tails))
-
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
- inputs=inputs[inside_interval_mask],
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
- inverse=inverse,
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
- min_bin_width=min_bin_width,
- min_bin_height=min_bin_height,
- min_derivative=min_derivative
- )
-
- return outputs, logabsdet
-
-def rational_quadratic_spline(inputs,
- unnormalized_widths,
- unnormalized_heights,
- unnormalized_derivatives,
- inverse=False,
- left=0., right=1., bottom=0., top=1.,
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
- min_derivative=DEFAULT_MIN_DERIVATIVE):
- if torch.min(inputs) < left or torch.max(inputs) > right:
- raise ValueError('Input to a transform is not within its domain')
-
- num_bins = unnormalized_widths.shape[-1]
-
- if min_bin_width * num_bins > 1.0:
- raise ValueError('Minimal bin width too large for the number of bins')
- if min_bin_height * num_bins > 1.0:
- raise ValueError('Minimal bin height too large for the number of bins')
-
- widths = F.softmax(unnormalized_widths, dim=-1)
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
- cumwidths = torch.cumsum(widths, dim=-1)
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
- cumwidths = (right - left) * cumwidths + left
- cumwidths[..., 0] = left
- cumwidths[..., -1] = right
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
-
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
-
- heights = F.softmax(unnormalized_heights, dim=-1)
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
- cumheights = torch.cumsum(heights, dim=-1)
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
- cumheights = (top - bottom) * cumheights + bottom
- cumheights[..., 0] = bottom
- cumheights[..., -1] = top
- heights = cumheights[..., 1:] - cumheights[..., :-1]
-
- if inverse:
- bin_idx = searchsorted(cumheights, inputs)[..., None]
- else:
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
-
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
-
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
- delta = heights / widths
- input_delta = delta.gather(-1, bin_idx)[..., 0]
-
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
-
- input_heights = heights.gather(-1, bin_idx)[..., 0]
-
- if inverse:
- a = (((inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta)
- + input_heights * (input_delta - input_derivatives)))
- b = (input_heights * input_derivatives
- - (inputs - input_cumheights) * (input_derivatives
- + input_derivatives_plus_one
- - 2 * input_delta))
- c = - input_delta * (inputs - input_cumheights)
-
- discriminant = b.pow(2) - 4 * a * c
- assert (discriminant >= 0).all()
-
- root = (2 * c) / (-b - torch.sqrt(discriminant))
- outputs = root * input_bin_widths + input_cumwidths
-
- theta_one_minus_theta = root * (1 - root)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - root).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, -logabsdet
- else:
- theta = (inputs - input_cumwidths) / input_bin_widths
- theta_one_minus_theta = theta * (1 - theta)
-
- numerator = input_heights * (input_delta * theta.pow(2)
- + input_derivatives * theta_one_minus_theta)
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
- * theta_one_minus_theta)
- outputs = input_cumheights + numerator / denominator
-
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
- + 2 * input_delta * theta_one_minus_theta
- + input_derivatives * (1 - theta).pow(2))
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
-
- return outputs, logabsdet
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Fix Generator V.2.0 Samsungl.md b/spaces/1gistliPinn/ChatGPT4/Examples/Fix Generator V.2.0 Samsungl.md
deleted file mode 100644
index df2e3883fbe3cc77519584a058d594136c5e7d55..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Fix Generator V.2.0 Samsungl.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
Finally, since some people don’t like documentation and don’t want to read, we allow you to instantly see the output of our generators by downloading this draft and running the python script in the top-left corner of your browser. An article about our work can be found here:
-Fix Generator V.2.0 Samsungl
DOWNLOAD 🆓 https://imgfil.com/2uxZJw
-Solar generators are the most expensive option, costing seven times as much as a standard fuel-powered generator. Price isnt the only issue. With fuel-powered generators, the output is consistent and guaranteed. However, solar generators require sunlight can be affected by things like cloud cover, placement location, and the length of the dayso they are nowhere near as reliable as their fossil fuel counterparts. Solar generators do store power in a power bank, which manufacturers hope will get you through any cloudy patches. But the power bank wont charge when you are operating at capacity.
-A conventional generators main benefit over the other types listed in this article is power output. While there is a whole range of conventional generators, they usually have an output of at least 4,000 Watts and up to around 12,000 Watts. While thats overkill if you want to hook up a sound system for a family BBQ, its ideal if youre going to power multiple large appliances during a power outage. They are also cheaper than inverter or solar generators.
-The traditional list of uses of generators is often long. Powering something that needs power when the sun doesnt shine or when the power grid is down is the most common. A generator provides ongoing and predictable power during a power outage. A generator provides power for things such as running a home lighting system at night. It can provide power for lights when batteries run out or for power tools when the AC power isnt available. It can provide power to water pumps and pump stations during a power failure. It can charge a cell phone or other electronic devices when the grid is down and when the power isnt provided by the grid. A generator can power a lantern during a storm.
- 899543212b
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/README.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/README.md
deleted file mode 100644
index b5434136c4802fec5a20b22359473b60ddc6c434..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: ChatGPT Prompt Generator
-emoji: 👨🏻🎤
-colorFrom: purple
-colorTo: pink
-sdk: gradio
-sdk_version: 3.16.2
-app_file: app.py
-pinned: false
-license: apache-2.0
-duplicated_from: umair007/ChatGPT-prompt-generator
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dummynation Mod APK with Unlimited Troops and No Ads.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dummynation Mod APK with Unlimited Troops and No Ads.md
deleted file mode 100644
index e136697066c58368e78566e1ba7c1588fc5a3c2c..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Dummynation Mod APK with Unlimited Troops and No Ads.md
+++ /dev/null
@@ -1,106 +0,0 @@
-
-Dummynation Mod APK Unlimited Troops: How to Conquer the World with Ease
-Do you love strategy games where you can control a country and lead it to world domination? If so, you might want to check out Dummynation, a game that gives you unlimited power over a country with a single promise to fulfill: world domination. But how do you manage to achieve it? That's up to you. You can expand your territory by military occupation, analyze and manipulate diplomatic relations, use your country's resources to sustain your research and military campaigns, and determine your country's economic policy. Sounds exciting, right? But what if we tell you that there is a way to make it even more exciting? That's right, we are talking about Dummynation Mod APK Unlimited Troops, a modded version of the game that removes ads and adds new features, such as unlimited troops. In this article, we will tell you everything you need to know about this mod, how to download and install it, how to play it, and some tips and tricks to help you conquer the world with ease.
-dummynation mod apk unlimited troops
Download Zip ☆☆☆ https://urlin.us/2uT1WO
-What is Dummynation?
-Dummynation is a strategy game where you have unlimited power over a country, with a single promise to fulfill: world domination. How you manage to achieve it is up to you.
-A strategy game where you have unlimited power over a country
-In Dummynation, you can choose any country in the world to start with, and customize your leader's name and appearance. You can then use the map to select a target country and send your troops to occupy it. You can also monitor your power, relations, resources and economy on the dashboard, and use research and policy options to improve your country's performance and influence.
-The goal is to achieve world domination by expanding your territory, manipulating diplomacy, managing resources and economy
-The ultimate goal of Dummynation is to achieve world domination by any means necessary. You can expand your territory by invading other countries with your troops, or by forming alliances and treaties with them. You can also manipulate diplomatic relations by using propaganda, espionage, sabotage, or bribery. You can manage your resources by allocating them to different sectors, such as military, research, or economy. You can also determine your economic policy by setting taxes, tariffs, subsidies, or trade agreements. The game offers a lot of freedom and flexibility in how you want to play and achieve your goal.
-What is Dummynation Mod APK Unlimited Troops?
-Dummynation Mod APK Unlimited Troops is a modded version of the game that removes ads and adds new features, such as unlimited troops. The main feature is unlimited troops, which allows you to invade any country without worrying about casualties or costs.
-dummynation mod apk no ads
-dummynation mod apk latest version
-dummynation mod apk free download
-dummynation mod apk unlimited gems
-dummynation mod apk android
-dummynation mod apk happymod
-dummynation mod apk world domination
-dummynation mod apk strategy game
-dummynation mod apk unlimited power
-dummynation mod apk military occupation
-dummynation mod apk diplomatic relations
-dummynation mod apk resource management
-dummynation mod apk economic policy
-dummynation mod apk new weapons
-dummynation mod apk updated graphics
-dummynation mod apk new levels
-dummynation mod apk easy install
-dummynation mod apk compatible devices
-dummynation mod apk anti-ban mechanism
-dummynation mod apk unlock characters
-dummynation mod apk offline mode
-dummynation mod apk multiplayer mode
-dummynation mod apk custom country
-dummynation mod apk realistic simulation
-dummynation mod apk historical scenarios
-dummynation mod apk random events
-dummynation mod apk achievements and leaderboards
-dummynation mod apk tips and tricks
-dummynation mod apk cheats and hacks
-dummynation mod apk reviews and ratings
-how to download dummynation mod apk unlimited troops
-how to play dummynation mod apk unlimited troops
-how to update dummynation mod apk unlimited troops
-how to uninstall dummynation mod apk unlimited troops
-how to backup and restore dummynation mod apk unlimited troops
-how to fix errors in dummynation mod apk unlimited troops
-how to contact developer of dummynation mod apk unlimited troops
-how to support developer of dummynation mod apk unlimited troops
-best alternatives to dummynation mod apk unlimited troops
-best strategies for dummynation mod apk unlimited troops
-best countries to play in dummynation mod apk unlimited troops
-best weapons to use in dummynation mod apk unlimited troops
-best allies and enemies in dummynation mod apk unlimited troops
-best resources to invest in dummynation mod apk unlimited troops
-best economic policies to adopt in dummynation mod apk unlimited troops
-best ways to achieve world domination in dummynation mod apk unlimited troops
-best ways to avoid war in dummynation mod apk unlimited troops
-best ways to win war in dummynation mod apk unlimited troops
-best ways to have fun in dummynation mod apk unlimited troops
-A modded version of the game that removes ads and adds new features
-Dummynation Mod APK Unlimited Troops is a modified version of the original game that removes annoying ads and adds new features that enhance the gameplay. The modded version is not available on the official app store, but you can download it from a reliable source online. The modded version does not require root access or any special permissions to install and run.
-The main feature is unlimited troops, which allows you to invade any country without worrying about casualties or costs
-The main feature of Dummynation Mod APK Unlimited Troops is unlimited troops, which means you can send as many troops as you want to any country you want to invade. You don't have to worry about losing troops or spending money on them. You can also use different types of troops, such as infantry, tanks, planes, ships, or missiles. This feature gives you a huge advantage over your enemies and makes it easier to conquer the world.
-How to download and install Dummynation Mod APK Unlimited Troops?
-Downloading and installing Dummynation Mod APK Unlimited Troops is easy and simple. Just follow these steps:
-Download the modded APK file from a reliable source
-The first step is to download the modded APK file from a reliable source online. You can search for Dummynation Mod APK Unlimited Troops on Google or any other search engine and find a link that offers a safe and secure download. Make sure you download the latest version of the mod that is compatible with your device.
-Enable unknown sources on your device settings
-The next step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the official app store. To do this, go to your device settings and look for security or privacy options. Then find the option that says unknown sources or allow installation from unknown sources and turn it on.
-Install the APK file and launch the game
-The final step is to install the APK file and launch the game. To do this, locate the downloaded APK file on your device storage and tap on it. Follow the instructions on the screen to complete the installation process. Once done, you can launch the game from your app drawer or home screen and enjoy playing Dummynation Mod APK Unlimited Troops.
How to play Dummynation Mod APK Unlimited Troops?
-Playing Dummynation Mod APK Unlimited Troops is fun and easy. Just follow these steps:
-Choose a country to start with and customize your leader's name and appearance
-The first step is to choose a country to start with and customize your leader's name and appearance. You can choose any country in the world, from the USA to China, from Russia to Brazil, from India to Australia. You can also change your leader's name, gender, hair, skin, and clothes. You can make your leader look like yourself, a famous person, or a fictional character. The choice is yours.
-Use the map to select a target country and send your troops to occupy it
-The next step is to use the map to select a target country and send your troops to occupy it. You can zoom in and out of the map and see the details of each country, such as its name, flag, population, power, relations, resources, and economy. You can also see the color of each country, which indicates its status: green for allies, red for enemies, yellow for neutral, and blue for yourself. To select a target country, simply tap on it and see its information on the bottom of the screen. To send your troops to occupy it, tap on the attack button and choose the type and number of troops you want to send. You can use unlimited troops, so don't be afraid to send as many as you want.
-Monitor your power, relations, resources and economy on the dashboard
-The third step is to monitor your power, relations, resources and economy on the dashboard. The dashboard is located on the top of the screen and shows you important information about your country and the world. You can see your power level, which indicates how strong you are compared to other countries. You can also see your relations with other countries, which indicates how friendly or hostile they are towards you. You can also see your resources, which include food, water, oil, metal, uranium, and money. You can use your resources to sustain your research and military campaigns. You can also see your economy, which includes your income and expenses. You can use your economy to determine your tax rate, trade agreements, subsidies, and tariffs.
-Use research and policy options to improve your country's performance and influence
-The fourth step is to use research and policy options to improve your country's performance and influence. You can access these options by tapping on the menu button on the top right corner of the screen. You can then choose between research or policy options. Research options allow you to unlock new technologies that can improve your military, economy, or diplomacy. For example, you can research nuclear weapons that can destroy entire countries in one strike. Policy options allow you to set your country's stance on various issues that can affect your relations with other countries. For example, you can set your policy on human rights that can make you more popular or unpopular among other countries.
Tips and tricks for Dummynation Mod APK Unlimited Troops
-Playing Dummynation Mod APK Unlimited Troops can be a lot of fun, but also challenging. Here are some tips and tricks to help you conquer the world with ease:
-Balance your expansion and diplomacy to avoid creating too many enemies
-While it may be tempting to use your unlimited troops to invade every country you see, you should also consider the consequences of your actions. If you create too many enemies, you may face a coalition of countries that will try to stop you. You may also lose the support of your allies, who may turn against you or abandon you. Therefore, you should balance your expansion and diplomacy to avoid creating too many enemies. You can do this by forming alliances with other countries, respecting their sovereignty, honoring your treaties, and avoiding unnecessary conflicts. You can also use diplomacy to persuade or intimidate other countries to join you or surrender to you.
-Use your unlimited troops wisely and strategically to overcome stronger opponents
-Even though you have unlimited troops, you should still use them wisely and strategically to overcome stronger opponents. You should not just send your troops blindly to any country, but rather plan your attacks carefully and choose the best type and number of troops for each situation. You should also consider the terrain, weather, distance, and defense of each country before attacking them. You should also use different types of troops, such as infantry, tanks, planes, ships, or missiles, to exploit the weaknesses of your enemies and gain an advantage over them.
-Invest in research and economy to gain an edge over your rivals
-Besides using your unlimited troops, you should also invest in research and economy to gain an edge over your rivals. Research can help you unlock new technologies that can improve your military, economy, or diplomacy. For example, you can research nuclear weapons that can destroy entire countries in one strike, or stealth technology that can make your troops invisible to radar. Economy can help you increase your income and reduce your expenses. For example, you can increase your tax rate, trade agreements, subsidies, or tariffs to boost your revenue, or reduce your military spending, welfare spending, or debt payments to lower your costs.
-Explore new levels and areas to discover new challenges and rewards
-Dummynation Mod APK Unlimited Troops offers a lot of variety and replay value by providing different levels and areas to explore. Each level has a different difficulty and objective, such as conquering a continent, a region, or the whole world. Each area has a different theme and design, such as Europe, Asia, Africa, America, or Antarctica. By exploring new levels and areas, you can discover new challenges and rewards that will keep you entertained and motivated.
-Conclusion
-Dummynation Mod APK Unlimited Troops is a fun and addictive game that lets you experience the thrill of world domination. The modded version enhances the gameplay by removing ads and adding unlimited troops and other features. The game is easy to download, install and play, and offers hours of entertainment for strategy lovers. If you are looking for a game that will challenge your strategic skills and satisfy your desire for power, then Dummynation Mod APK Unlimited Troops is the game for you.
-FAQs
-Is Dummynation Mod APK Unlimited Troops safe to use?
-Dummynation Mod APK Unlimited Troops is safe to use as long as you download it from a reliable source online. The modded version does not contain any viruses or malware that can harm your device or data. However, you should always be careful when downloading any app from unknown sources and scan it with an antivirus before installing it.
-What are the benefits of using Dummynation Mod APK Unlimited Troops?
-The benefits of using Dummynation Mod APK Unlimited Troops are that it removes ads and adds unlimited troops and other features that enhance the gameplay. By using this mod, you can enjoy playing Dummynation without any interruptions or limitations. You can also have more fun and freedom in conquering the world with unlimited troops.
-How can I update Dummynation Mod APK Unlimited Troops?
-You can update Dummynation Mod APK Unlimited Troops by downloading the latest version of the mod from a reliable source online. You can then install it over the existing version without losing your progress or data. You should always update the mod whenever there is a new version available to ensure compatibility and performance.
-Can I play Dummynation Mod APK Unlimited Troops offline?
-Yes, you can play Dummynation Mod APK Unlimited Troops offline without any internet connection. The game does not require any internet connection to run or save your progress. However, you may need an internet connection to download and install the mod, or to access some online features, such as leaderboards or achievements.
-Can I share my progress and achievements with other players?
-Yes, you can share your progress and achievements with other players by using the social media buttons on the game. You can also compare your scores and rankings with other players on the leaderboards or achievements. You can also challenge your friends or other players to see who can conquer the world faster or better.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/.md b/spaces/1phancelerku/anime-remove-background/.md
deleted file mode 100644
index 4006a4466de3dcff3afbdcd20584f5220ec62af3..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/.md
+++ /dev/null
@@ -1,44 +0,0 @@
-## Warman Crack With Full Game
-
-
-
-
-
- WORK
-
-
-
-**Click Here ✅ [https://vittuv.com/2tBMxo](https://vittuv.com/2tBMxo)**
-
-
-
-
-
-
-
-
-
-
-
-
-
-Ansys Discovery Student is a cutting-edge product design software for students that leverages our instantaneous simulation technology. It allows you to create and modify geometry models easily with Ansys SpaceClaim technology, which is a direct modeling tool that eliminates the need for complex CAD operations. It also enables you to perform thermal, structural and fluids simulations in real time with completely meshless and interactive solvers. With Ansys Discovery Student, you can explore and understand physics concepts without spending a lot of time learning how to use a complicated simulation tool.
-
-
-
-Ansys Discovery Student is ideal for students who want to learn about product design and engineering in a fun and intuitive way. You can experiment with different design scenarios and see how they affect the performance and behavior of your product. You can also compare different physics phenomena and discover how they interact with each other. For example, you can see how heat transfer affects the stress and deformation of a metal part, or how fluid flow affects the aerodynamics and lift of a wing.
-
-
-
-Ansys Discovery Student is also a great tool for students who want to prepare for their future careers in engineering and design. You can use it to create impressive projects and portfolios that showcase your skills and creativity. You can also use it to collaborate with your classmates and instructors and get feedback on your work. Ansys Discovery Student is compatible with other Ansys products, so you can easily export your models and simulations to other tools for further analysis and optimization.
-
-
-
-Ansys Discovery Student is free to download and use for academic purposes. You can install it on your personal computer or laptop and access it anytime and anywhere. You can also access online tutorials, videos, webinars and community forums to help you get started and learn more about the software. Ansys Discovery Student is the ultimate product design software for students who want to learn by doing and have fun along the way.
-
- 145887f19f
-
-
-
-
-
diff --git a/spaces/1phancelerku/anime-remove-background/Bus Simulator Indonesia Mod APK A Game that Combines Simulation Adventure and Education.md b/spaces/1phancelerku/anime-remove-background/Bus Simulator Indonesia Mod APK A Game that Combines Simulation Adventure and Education.md
deleted file mode 100644
index 9dcc71058a7b7005997d3bb17ae578bb5a22fb63..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Bus Simulator Indonesia Mod APK A Game that Combines Simulation Adventure and Education.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-Game Bus Simulator Indonesia Mod APK: A Fun and Realistic Driving Experience
- Do you love driving games? Do you want to explore the beautiful and diverse cities of Indonesia? Do you want to customize your own bus and drive it on realistic roads? If you answered yes to any of these questions, then you should try Game Bus Simulator Indonesia Mod APK. This is a modified version of the popular game Bus Simulator Indonesia, which lets you enjoy unlimited money, fuel, and other features that make the game more fun and exciting. In this article, we will tell you everything you need to know about Game Bus Simulator Indonesia Mod APK, including its features, how to download and install it, and its pros and cons.
- What is Game Bus Simulator Indonesia Mod APK?
- Game Bus Simulator Indonesia Mod APK is based on driving a bus in various cities of Indonesia to perform various tasks on your android phone. In this game, you need to pick up the passengers from different areas of an Indonesian city and drop them at the destination. You can also drive freely around the city and enjoy the scenery. You can choose from different types of buses, such as mini buses, double-decker buses, or luxury buses. You can also customize your bus with different skins, stickers, horns, lights, and more. You can also experience realistic traffic, weather, day and night cycles, and other aspects of driving in Indonesia.
-game bus simulator indonesia mod apk
Download ☆☆☆ https://jinyurl.com/2uNJO1
- Features of Game Bus Simulator Indonesia Mod APK
- - Unlimited money and fuel
- One of the best features of Game Bus Simulator Indonesia Mod APK is that it gives you unlimited money and fuel. This means that you can buy any bus you want, upgrade it with any accessories you like, and drive it as long as you want without worrying about running out of gas. You can also use the money to unlock new cities, modes, and missions in the game.
- - Customizable buses and skins
- Another great feature of Game Bus Simulator Indonesia Mod APK is that it allows you to customize your buses with different skins and accessories. You can change the color, design, logo, name, number plate, and more of your bus. You can also add stickers, horns, lights, mirrors, spoilers, exhausts, and more to your bus. You can make your bus look unique and stylish according to your preference.
- - Realistic traffic and weather
- Game Bus Simulator Indonesia Mod APK also offers realistic traffic and weather conditions in the game. You will encounter different types of vehicles on the road, such as cars, trucks, motorcycles, bicycles, rickshaws, etc. You will also have to follow the traffic rules and signals, such as speed limits, stop signs, red lights, etc. You will also experience different weather effects, such as rain, fog, sun, wind, etc. You will have to adjust your driving accordingly to avoid accidents and delays.
- - Various modes and missions
- Game Bus Simulator Indonesia Mod APK also provides various modes and missions for you to enjoy. You can choose from free mode, career mode, or multiplayer mode. In free mode, you can drive anywhere you want without any restrictions or objectives. In career mode, you have to complete different tasks and challenges to earn money and reputation. In multiplayer mode, you can play with other players online and compete with them in races or other events. You can also chat with them using the built-in voice chat feature.
- How to download and install Game Bus Simulator Indonesia Mod APK?
- Requirements for Game Bus Simulator Indonesia Mod APK
Steps to download and install Game Bus Simulator Indonesia Mod APK
- If you want to download and install Game Bus Simulator Indonesia Mod APK on your android device, you need to follow these simple steps:
-
-- Click on the download link to get the Game Bus Simulator Indonesia Mod APK file.
-- Allow the installation of unknown sources on your device by going to Settings > Security > Unknown Sources.
-- Locate the downloaded file in your file manager and tap on it to start the installation process.
-- Follow the instructions on the screen and wait for the installation to complete.
-- Launch the game and enjoy driving your bus in Indonesia.
-
- Pros and cons of Game Bus Simulator Indonesia Mod APK
- Game Bus Simulator Indonesia Mod APK is a fun and realistic driving game that lets you experience the culture and scenery of Indonesia. However, like any other game, it also has some pros and cons that you should consider before playing it. Here are some of them:
- Pros of Game Bus Simulator Indonesia Mod APK
-
-- It is free to download and play.
-- It has unlimited money and fuel, which makes the game more enjoyable and less stressful.
-- It has customizable buses and skins, which gives you more options and creativity.
-- It has realistic traffic and weather, which adds more challenge and realism to the game.
-- It has various modes and missions, which keeps the game interesting and diverse.
-- It has a multiplayer mode, which allows you to play with other players online and have more fun.
-
- Cons of Game Bus Simulator Indonesia Mod APK
-
-- It may not be compatible with some devices or versions of Android.
-- It may have some bugs or glitches that affect the gameplay or performance.
-- It may require a stable internet connection for some features or modes.
-- It may not be updated regularly or have new content or features.
-
- Conclusion
- Game Bus Simulator Indonesia Mod APK is a great game for anyone who loves driving games and wants to explore the beautiful and diverse cities of Indonesia. It offers unlimited money, fuel, customization, realism, variety, and multiplayer features that make the game more fun and exciting. However, it also has some drawbacks, such as compatibility issues, bugs, internet requirements, and lack of updates. Therefore, you should weigh the pros and cons before downloading and installing it on your device. If you are looking for a fun and realistic driving experience in Indonesia, then you should give Game Bus Simulator Indonesia Mod APK a try.
- FAQs
- Here are some frequently asked questions about Game Bus Simulator Indonesia Mod APK:
-game bus simulator indonesia mod apk unlimited money
-game bus simulator indonesia mod apk download latest version
-game bus simulator indonesia mod apk offline
-game bus simulator indonesia mod apk 2021
-game bus simulator indonesia mod apk free shopping
-game bus simulator indonesia mod apk revdl
-game bus simulator indonesia mod apk terbaru
-game bus simulator indonesia mod apk android 1
-game bus simulator indonesia mod apk unlimited fuel
-game bus simulator indonesia mod apk hack
-game bus simulator indonesia mod apk obb
-game bus simulator indonesia mod apk rexdl
-game bus simulator indonesia mod apk no ads
-game bus simulator indonesia mod apk update
-game bus simulator indonesia mod apk full unlocked
-game bus simulator indonesia mod apk unlimited everything
-game bus simulator indonesia mod apk data
-game bus simulator indonesia mod apk pure
-game bus simulator indonesia mod apk happymod
-game bus simulator indonesia mod apk all buses unlocked
-game bus simulator indonesia mod apk cheat
-game bus simulator indonesia mod apk new version
-game bus simulator indonesia mod apk online
-game bus simulator indonesia mod apk an1
-game bus simulator indonesia mod apk unlimited diamond
-game bus simulator indonesia mod apk latest
-game bus simulator indonesia mod apk original
-game bus simulator indonesia mod apk lenov.ru
-game bus simulator indonesia mod apk old version
-game bus simulator indonesia mod apk unlimited coin
-game bus simulator indonesia mod apk versi lama
-game bus simulator indonesia mod apk mega
-game bus simulator indonesia mod apk pro
-game bus simulator indonesia mod apk premium
-game bus simulator indonesia mod apk vip
-game bus simulator indonesia mod apk plus
-game bus simulator indonesia mod apk 2020
-game bus simulator indonesia mod apk android oyun club
-game bus simulator indonesia mod apk andropalace
-game bus simulator indonesia mod apk apkpure.com
-
-- Is Game Bus Simulator Indonesia Mod APK safe to download and install?
-Yes, Game Bus Simulator Indonesia Mod APK is safe to download and install as long as you get it from a trusted source. However, you should always scan the file for viruses or malware before installing it on your device.
- - What is the difference between Game Bus Simulator Indonesia Mod APK and the original game?
-The main difference between Game Bus Simulator Indonesia Mod APK and the original game is that the modded version gives you unlimited money, fuel, customization, and other features that are not available in the original game. The modded version also bypasses some restrictions or limitations that are imposed by the original game.
- - Can I play Game Bus Simulator Indonesia Mod APK offline?
-You can play Game Bus Simulator Indonesia Mod APK offline in free mode or career mode. However, you will need an internet connection to play multiplayer mode or access some online features or events.
- - How can I update Game Bus Simulator Indonesia Mod APK?
-You can update Game Bus Simulator Indonesia Mod APK by downloading and installing the latest version from the same source. However, you should always back up your data before updating to avoid losing your progress or settings.
- - How can I contact the developers of Game Bus Simulator Indonesia Mod APK?
-You can contact the developers of Game Bus Simulator Indonesia Mod APK by visiting their official website or their social media pages. You can also leave a comment or review on their download page or send them an email at support@maleo.id.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download and Install Instagram 4.0 2 APK - The Best Way to Share Your Photos and Videos.md b/spaces/1phancelerku/anime-remove-background/Download and Install Instagram 4.0 2 APK - The Best Way to Share Your Photos and Videos.md
deleted file mode 100644
index e58f17d6199b48b5cad383c40fe66d11b2974df9..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download and Install Instagram 4.0 2 APK - The Best Way to Share Your Photos and Videos.md
+++ /dev/null
@@ -1,127 +0,0 @@
-
-Download Instagram 4.0 2 APK: How to Get the Latest Version of the Popular Social Media App
- Do you love sharing your photos, videos, stories, reels, and more with your friends and followers on Instagram? Do you want to get the latest features and updates of the app without waiting for the official release on Google Play Store? If yes, then you might be interested in downloading Instagram 4.0 2 APK, which is the latest version of the app as of June 2023. In this article, we will explain what Instagram is, what an APK file is, and how to download and install Instagram 4.0 2 APK on your Android device.
-download instagram 4.0 2 apk
DOWNLOAD > https://jinyurl.com/2uNPFR
- What is Instagram and why do you need it?
- Instagram is one of the most popular social media apps in the world, with over one billion monthly active users. It allows you to create and share your photos, videos, stories, reels, live broadcasts, IGTV videos, and more with the people you care about. You can also discover new content from other users, celebrities, brands, and influencers that match your interests. You can also chat with your friends, send voice messages, video calls, stickers, GIFs, and more through Instagram Direct. You can also shop for products, watch videos, play games, and access other apps through Instagram.
- Instagram features and benefits
- Some of the features and benefits of using Instagram are:
-
-- You can edit your photos and videos with filters, stickers, text, music, effects, and more.
-- You can create short-form videos with reels, which are fun and creative ways to express yourself.
-- You can share your moments with stories, which disappear after 24 hours.
-- You can go live with your friends or followers and interact with them in real-time.
-- You can upload longer videos with IGTV, which is a platform for vertical videos.
-- You can explore content from different categories with Explore, which shows you personalized recommendations based on your preferences.
-- You can follow hashtags, accounts, topics, and locations that interest you.
-- You can shop for products from your favorite brands and creators with Shopping.
-- You can join or create rooms with up to 50 people with Messenger Rooms.
-- You can access other apps like Facebook Watch, Spotify, TikTok, Netflix, and more with App Clips.
-
- Instagram requirements and compatibility
- To use Instagram on your Android device, you need to have:
-download instagram 4.0 2 apk for android
-download instagram 4.0 2 apk latest version
-download instagram 4.0 2 apk free
-download instagram 4.0 2 apk mod
-download instagram 4.0 2 apk old version
-download instagram 4.0 2 apk file
-download instagram 4.0 2 apk from google play
-download instagram 4.0 2 apk update
-download instagram 4.0 2 apk beta
-download instagram 4.0 2 apk mirror
-download instagram 4.0 2 apk offline
-download instagram 4.0 2 apk cracked
-download instagram 4.0 2 apk hack
-download instagram 4.0 2 apk no ads
-download instagram 4.0 2 apk premium
-download instagram 4.0 2 apk pro
-download instagram 4.0 2 apk full
-download instagram 4.0 2 apk unlocked
-download instagram 4.0 2 apk original
-download instagram 4.0 2 apk safe
-download instagram 4.0 2 apk direct link
-download instagram 4.0 2 apk for pc
-download instagram 4.0 2 apk for ios
-download instagram 4.0 2 apk for windows
-download instagram 4.0 2 apk for mac
-download instagram 4.0 2 apk for tablet
-download instagram 4.0 2 apk for firestick
-download instagram 4.0 2 apk for smart tv
-download instagram 4.0 2 apk for chromebook
-download instagram 4.0 2 apk for huawei
-download instagram 4.0 2 apk for samsung
-download instagram 4.0 2 apk for xiaomi
-download instagram 4.0 2 apk for oppo
-download instagram 4.0 2 apk for vivo
-download instagram 4.0 2 apk for nokia
-download instagram 4.0 2 apk for lg
-download instagram 4.0 2 apk for sony
-download instagram 4.0 2 apk for oneplus
-download instagram
-
-- An Android device running Android 4.1 or higher.
-- A stable internet connection (Wi-Fi or mobile data).
-- An Instagram account (you can sign up with your email address, phone number, or Facebook account).
-- At least 100 MB of free storage space on your device.
-
- What is an APK file and why do you need it?
- An APK file is an Android Package Kit file that contains all the files and code needed to install an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac. You can download APK files from various sources online, such as websites, blogs, forums, or app stores. However, not all APK files are safe or reliable. Some may contain malware or viruses that can harm your device or steal your personal information. Therefore, you need to be careful when downloading APK files from unknown sources.
- AP APK file definition and advantages
-
An APK file is an Android Package Kit file that contains all the files and code needed to install an app on an Android device. It is similar to an EXE file on Windows or a DMG file on Mac. You can download APK files from various sources online, such as websites, blogs, forums, or app stores. However, not all APK files are safe or reliable. Some may contain malware or viruses that can harm your device or steal your personal information. Therefore, you need to be careful when downloading APK files from unknown sources.
- Some of the advantages of using APK files are:
-
-- You can get the latest version of an app before it is officially released on Google Play Store.
-- You can access apps that are not available in your region or country.
-- You can install apps that are not compatible with your device or Android version.
-- You can customize or modify apps according to your preferences.
-- You can backup or restore apps and their data easily.
-
- APK file risks and precautions
- Some of the risks and precautions of using APK files are:
-
-- You may download fake or malicious apps that can damage your device or compromise your security.
-- You may violate the terms and conditions of the app developer or Google Play Store.
-- You may lose the warranty or support of your device manufacturer or service provider.
-- You may encounter bugs, errors, crashes, or compatibility issues with the app or your device.
-- You may need to update the app manually whenever a new version is available.
-
- How to download Instagram 4.0 2 APK?
- If you want to download Instagram 4.0 2 APK, you need to follow these steps:
- Step 1: Enable unknown sources on your device
- Before you can install any APK file on your device, you need to enable unknown sources in your settings. This will allow you to install apps from sources other than Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that says installing apps from unknown sources may harm your device. Tap OK to proceed.
- Step 2: Find a reliable source for the APK file
- The next step is to find a reliable source for the Instagram 4.0 2 APK file. You can search online for websites, blogs, forums, or app stores that offer the APK file. However, be careful not to download from shady or untrustworthy sites that may contain malware or viruses. You can also check the reviews, ratings, comments, and feedback from other users who have downloaded the APK file before. You can also scan the APK file with an antivirus app before installing it.
- Step 3: Download and install the APK file
- Once you have found a reliable source for the Instagram 4.0 2 APK file, you can download it to your device. You may need to grant permission for the browser or app to download the file. After the download is complete, you can open the file and tap Install. You may see a message that says installing this app may harm your device. Tap Install Anyway to continue. Wait for the installation process to finish.
- Step 4: Launch and enjoy Instagram 4.0 2
- After the installation is done, you can launch Instagram 4.0 2 from your app drawer or home screen. You can sign in with your existing account or create a new one if you don't have one yet. You can then enjoy all the features and updates of Instagram 4.0 2 on your device.
- Conclusion
- In this article, we have explained what Instagram is, what an APK file is, and how to download and install Instagram 4.0 2 APK on your Android device. We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below.
- FAQs
- Here are some frequently asked questions about Instagram 4.0 2 APK:
-
-- Is Instagram 4.0 2 APK safe?
-Instagram 4.0 2 APK is safe as long as you download it from a reliable source and scan it with an antivirus app before installing it. However, there is always a risk of downloading fake or malicious apps from unknown sources, so be careful and use your own discretion.
-- What are the new features of Instagram 4.0
- What are the new features of Instagram 4.0 2 APK?
-Instagram 4.0 2 APK has some new features and improvements, such as:
-
-- You can create and join audio rooms with up to 50 people with Live Audio.
-- You can add captions to your stories and reels automatically with Captions Sticker.
-- You can remix your reels with other users' reels with Remix Reels.
-- You can hide or unhide your likes and views on your posts with Hide Like Counts.
-- You can save your drafts of stories and reels with Story Drafts and Reels Drafts.
-
- - How to update Instagram 4.0 2 APK?
-To update Instagram 4.0 2 APK, you need to download the latest version of the APK file from a reliable source and install it on your device. You may need to uninstall the previous version of the app before installing the new one. Alternatively, you can wait for the official update on Google Play Store, which may take some time to be available.
-- How to uninstall Instagram 4.0 2 APK?
-To uninstall Instagram 4.0 2 APK, you need to go to Settings > Apps > Instagram and tap Uninstall. You may also need to delete the APK file from your device storage. If you want to reinstall the app, you can download it from Google Play Store or another source.
-- How to contact Instagram support?
-If you have any issues or problems with Instagram, you can contact Instagram support through the following ways:
-
-- You can report a problem or send feedback through the app by going to Settings > Help > Report a Problem.
-- You can visit the Instagram Help Center website at https://help.instagram.com/ for FAQs, guides, tips, and more.
-- You can follow the Instagram official account on Twitter at https://twitter.com/instagram for updates, announcements, and more.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/2ndelement/voicevox/voicevox_engine/cancellable_engine.py b/spaces/2ndelement/voicevox/voicevox_engine/cancellable_engine.py
deleted file mode 100644
index 1bedb3ff3ebce858d8c585cf8b0d121a4d816210..0000000000000000000000000000000000000000
--- a/spaces/2ndelement/voicevox/voicevox_engine/cancellable_engine.py
+++ /dev/null
@@ -1,220 +0,0 @@
-import argparse
-import asyncio
-import queue
-from multiprocessing import Pipe, Process
-from multiprocessing.connection import Connection
-from tempfile import NamedTemporaryFile
-from typing import List, Optional, Tuple
-
-import soundfile
-
-# FIXME: remove FastAPI dependency
-from fastapi import HTTPException, Request
-
-from .model import AudioQuery
-from .synthesis_engine import make_synthesis_engines
-from .utility import get_latest_core_version
-
-
-class CancellableEngine:
- """
- 音声合成のキャンセル機能に関するクラス
- 初期化後は、synthesis関数で音声合成できる
- (オリジナルと比べ引数が増えているので注意)
-
- Attributes
- ----------
- watch_con_list: List[Tuple[Request, Process]]
- Requestは接続の監視に使用され、Processは通信切断時のプロセスキルに使用される
- クライアントから接続があるとListにTupleが追加される
- 接続が切断、もしくは音声合成が終了すると削除される
- procs_and_cons: queue.Queue[Tuple[Process, Connection]]
- 音声合成の準備が終わっているプロセスのList
- (音声合成中のプロセスは入っていない)
- """
-
- def __init__(self, args: argparse.Namespace) -> None:
- """
- 変数の初期化を行う
- また、args.init_processesの数だけプロセスを起動し、procs_and_consに格納する
- """
- self.args = args
- if not self.args.enable_cancellable_synthesis:
- raise HTTPException(
- status_code=404,
- detail="実験的機能はデフォルトで無効になっています。使用するには引数を指定してください。",
- )
-
- self.watch_con_list: List[Tuple[Request, Process]] = []
- self.procs_and_cons: queue.Queue[Tuple[Process, Connection]] = queue.Queue()
- for _ in range(self.args.init_processes):
- self.procs_and_cons.put(self.start_new_proc())
-
- def start_new_proc(
- self,
- ) -> Tuple[Process, Connection]:
- """
- 新しく開始したプロセスを返す関数
-
- Returns
- -------
- ret_proc: Process
- 新規のプロセス
- sub_proc_con1: Connection
- ret_procのプロセスと通信するためのPipe
- """
- sub_proc_con1, sub_proc_con2 = Pipe(True)
- ret_proc = Process(
- target=start_synthesis_subprocess,
- kwargs={
- "args": self.args,
- "sub_proc_con": sub_proc_con2,
- },
- daemon=True,
- )
- ret_proc.start()
- return ret_proc, sub_proc_con1
-
- def finalize_con(
- self,
- req: Request,
- proc: Process,
- sub_proc_con: Optional[Connection],
- ) -> None:
- """
- 接続が切断された時の処理を行う関数
- watch_con_listからの削除、プロセスの後処理を行う
- プロセスが生きている場合はそのままprocs_and_consに加える
- 死んでいる場合は新しく生成したものをprocs_and_consに加える
-
- Parameters
- ----------
- req: fastapi.Request
- 接続確立時に受け取ったものをそのまま渡せばよい
- https://fastapi.tiangolo.com/advanced/using-request-directly/
- proc: Process
- 音声合成を行っていたプロセス
- sub_proc_con: Connection, optional
- 音声合成を行っていたプロセスとのPipe
- 指定されていない場合、プロセスは再利用されず終了される
- """
- try:
- self.watch_con_list.remove((req, proc))
- except ValueError:
- pass
- try:
- if not proc.is_alive() or sub_proc_con is None:
- proc.close()
- raise ValueError
- # プロセスが死んでいない場合は再利用する
- self.procs_and_cons.put((proc, sub_proc_con))
- except ValueError:
- # プロセスが死んでいるので新しく作り直す
- self.procs_and_cons.put(self.start_new_proc())
-
- def _synthesis_impl(
- self,
- query: AudioQuery,
- speaker_id: int,
- request: Request,
- core_version: Optional[str],
- ) -> str:
- """
- 音声合成を行う関数
- 通常エンジンの引数に比べ、requestが必要になっている
- また、返り値がファイル名になっている
-
- Parameters
- ----------
- query: AudioQuery
- speaker_id: int
- request: fastapi.Request
- 接続確立時に受け取ったものをそのまま渡せばよい
- https://fastapi.tiangolo.com/advanced/using-request-directly/
- core_version: str
-
- Returns
- -------
- f_name: str
- 生成された音声ファイルの名前
- """
- proc, sub_proc_con1 = self.procs_and_cons.get()
- self.watch_con_list.append((request, proc))
- try:
- sub_proc_con1.send((query, speaker_id, core_version))
- f_name = sub_proc_con1.recv()
- except EOFError:
- raise HTTPException(status_code=422, detail="既にサブプロセスは終了されています")
- except Exception:
- self.finalize_con(request, proc, sub_proc_con1)
- raise
-
- self.finalize_con(request, proc, sub_proc_con1)
- return f_name
-
- async def catch_disconnection(self):
- """
- 接続監視を行うコルーチン
- """
- while True:
- await asyncio.sleep(1)
- for con in self.watch_con_list:
- req, proc = con
- if await req.is_disconnected():
- try:
- if proc.is_alive():
- proc.terminate()
- proc.join()
- proc.close()
- except ValueError:
- pass
- finally:
- self.finalize_con(req, proc, None)
-
-
-def start_synthesis_subprocess(
- args: argparse.Namespace,
- sub_proc_con: Connection,
-):
- """
- 音声合成を行うサブプロセスで行うための関数
- pickle化の関係でグローバルに書いている
-
- Parameters
- ----------
- args: argparse.Namespace
- 起動時に作られたものをそのまま渡す
- sub_proc_con: Connection
- メインプロセスと通信するためのPipe
- """
-
- synthesis_engines = make_synthesis_engines(
- use_gpu=args.use_gpu,
- voicelib_dirs=args.voicelib_dir,
- voicevox_dir=args.voicevox_dir,
- runtime_dirs=args.runtime_dir,
- cpu_num_threads=args.cpu_num_threads,
- enable_mock=args.enable_mock,
- )
- assert len(synthesis_engines) != 0, "音声合成エンジンがありません。"
- latest_core_version = get_latest_core_version(versions=synthesis_engines.keys())
- while True:
- try:
- query, speaker_id, core_version = sub_proc_con.recv()
- if core_version is None:
- _engine = synthesis_engines[latest_core_version]
- elif core_version in synthesis_engines:
- _engine = synthesis_engines[core_version]
- else:
- # バージョンが見つからないエラー
- sub_proc_con.send("")
- continue
- wave = _engine._synthesis_impl(query, speaker_id)
- with NamedTemporaryFile(delete=False) as f:
- soundfile.write(
- file=f, data=wave, samplerate=query.outputSamplingRate, format="WAV"
- )
- sub_proc_con.send(f.name)
- except Exception:
- sub_proc_con.close()
- raise
diff --git a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/README.md b/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/README.md
deleted file mode 100644
index c6fd17d778a9f9dbe7bf632c92e40e36e67b91d2..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/audio_to_text/captioning/utils/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# Utils
-
-Scripts in this directory are used as utility functions.
-
-## BERT Pretrained Embeddings
-
-You can load pretrained word embeddings in Google [BERT](https://github.com/google-research/bert#pre-trained-models) instead of training word embeddings from scratch. The scripts in `utils/bert` need a BERT server in the background. We use BERT server from [bert-as-service](https://github.com/hanxiao/bert-as-service).
-
-To use bert-as-service, you need to first install the repository. It is recommended that you create a new environment with Tensorflow 1.3 to run BERT server since it is incompatible with Tensorflow 2.x.
-
-After successful installation of [bert-as-service](https://github.com/hanxiao/bert-as-service), downloading and running the BERT server needs to execute:
-
-```bash
-bash scripts/prepare_bert_server.sh zh
-```
-
-By default, server based on BERT base Chinese model is running in the background. You can change to other models by changing corresponding model name and path in `scripts/prepare_bert_server.sh`.
-
-To extract BERT word embeddings, you need to execute `utils/bert/create_word_embedding.py`.
diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/openai.py b/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/openai.py
deleted file mode 100644
index 9911b6e135e51970177fcac067c12192b0b57c1c..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/openai.py
+++ /dev/null
@@ -1,129 +0,0 @@
-""" OpenAI pretrained model functions
-
-Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
-"""
-
-import os
-import warnings
-from typing import Union, List
-
-import torch
-
-from .model import build_model_from_openai_state_dict
-from .pretrained import get_pretrained_url, list_pretrained_tag_models, download_pretrained
-
-__all__ = ["list_openai_models", "load_openai_model"]
-
-
-def list_openai_models() -> List[str]:
- """Returns the names of available CLIP models"""
- return list_pretrained_tag_models('openai')
-
-
-def load_openai_model(
- name: str,
- model_cfg,
- device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
- jit=True,
- cache_dir=os.path.expanduser("~/.cache/clip"),
- enable_fusion: bool = False,
- fusion_type: str = 'None'
-):
- """Load a CLIP model, preserve its text pretrained part, and set in the CLAP model
-
- Parameters
- ----------
- name : str
- A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
- device : Union[str, torch.device]
- The device to put the loaded model
- jit : bool
- Whether to load the optimized JIT model (default) or more hackable non-JIT model.
-
- Returns
- -------
- model : torch.nn.Module
- The CLAP model
- preprocess : Callable[[PIL.Image], torch.Tensor]
- A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
- """
- if get_pretrained_url(name, 'openai'):
- model_path = download_pretrained(get_pretrained_url(name, 'openai'), root=cache_dir)
- elif os.path.isfile(name):
- model_path = name
- else:
- raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
-
- try:
- # loading JIT archive
- model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
- state_dict = None
- except RuntimeError:
- # loading saved state dict
- if jit:
- warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
- jit = False
- state_dict = torch.load(model_path, map_location="cpu")
-
- if not jit:
- try:
- model = build_model_from_openai_state_dict(state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type).to(device)
- except KeyError:
- sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
- model = build_model_from_openai_state_dict(sd, model_cfg, enable_fusion, fusion_type).to(device)
-
- if str(device) == "cpu":
- model.float()
- return model
-
- # patch the device names
- device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
- device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
-
- def patch_device(module):
- try:
- graphs = [module.graph] if hasattr(module, "graph") else []
- except RuntimeError:
- graphs = []
-
- if hasattr(module, "forward1"):
- graphs.append(module.forward1.graph)
-
- for graph in graphs:
- for node in graph.findAllNodes("prim::Constant"):
- if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
- node.copyAttributes(device_node)
-
- model.apply(patch_device)
- patch_device(model.encode_audio)
- patch_device(model.encode_text)
-
- # patch dtype to float32 on CPU
- if str(device) == "cpu":
- float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
- float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
- float_node = float_input.node()
-
- def patch_float(module):
- try:
- graphs = [module.graph] if hasattr(module, "graph") else []
- except RuntimeError:
- graphs = []
-
- if hasattr(module, "forward1"):
- graphs.append(module.forward1.graph)
-
- for graph in graphs:
- for node in graph.findAllNodes("aten::to"):
- inputs = list(node.inputs())
- for i in [1, 2]: # dtype can be the second or third argument to aten::to()
- if inputs[i].node()["value"] == 5:
- inputs[i].node().copyAttributes(float_node)
-
- model.apply(patch_float)
- patch_float(model.encode_audio)
- patch_float(model.encode_text)
- model.float()
-
- model.audio_branch.audio_length = model.audio_cfg.audio_length
- return model
diff --git a/spaces/Abdllh/poetry2023/README.md b/spaces/Abdllh/poetry2023/README.md
deleted file mode 100644
index fa5c6ad64f181ff6051745354b4af489527806f1..0000000000000000000000000000000000000000
--- a/spaces/Abdllh/poetry2023/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Poetry2023
-emoji: 👁
-colorFrom: green
-colorTo: gray
-sdk: gradio
-sdk_version: 3.16.0
-app_file: app.py
-pinned: false
-duplicated_from: aaaaaabbbbbbbdddddddduuuuulllll/poetry2023
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring-plugin.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring-plugin.d.ts
deleted file mode 100644
index b5320db02fb83b864997d0a125a06e76d586a604..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/lzstring-plugin.d.ts
+++ /dev/null
@@ -1,8 +0,0 @@
-import LZString from './lzstring';
-
-export default class LZStringPlugin extends Phaser.Plugins.BasePlugin {
- add(
- config?: LZString.IConfig
- ): LZString;
-
-}
\ No newline at end of file
diff --git a/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/README.md b/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/README.md
deleted file mode 100644
index 76c690992c0a4ee15b2247436a306375a62c61d3..0000000000000000000000000000000000000000
--- a/spaces/AlekseyKorshuk/thin-plate-spline-motion-model/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Thin Plate Spline Motion Model
-emoji: 💩
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 2.9.4
-app_file: app.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
diff --git a/spaces/AlexWang/lama/app.py b/spaces/AlexWang/lama/app.py
deleted file mode 100644
index cd0e6aa3eaecdf05c2304ed9aaab3fc068fa2d23..0000000000000000000000000000000000000000
--- a/spaces/AlexWang/lama/app.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import os
-os.system("wget https://huggingface.co/akhaliq/lama/resolve/main/best.ckpt")
-os.system("pip install imageio")
-os.system("pip install albumentations==0.5.2")
-import cv2
-import paddlehub as hub
-import gradio as gr
-import torch
-from PIL import Image, ImageOps
-import numpy as np
-import imageio
-os.mkdir("data")
-os.rename("best.ckpt", "models/best.ckpt")
-os.mkdir("dataout")
-model = hub.Module(name='U2Net')
-
-
-def infer(img, mask, option):
- print(type(img["image"]), img["image"].shape)
- imageio.imwrite("./data/data.png", img["image"])
- if option == "Upload":
- imageio.imwrite("./data/data_mask.png", mask)
- elif option == "Automatic (U2net)":
- result = model.Segmentation(
- images=[cv2.cvtColor(img["image"], cv2.COLOR_RGB2BGR)],
- paths=None,
- batch_size=1,
- input_size=320,
- output_dir='output',
- visualization=True)
- im = Image.fromarray(result[0]['mask'])
- im.save("./data/data_mask.png")
- else:
- imageio.imwrite("./data/data_mask.png", img["mask"])
- os.system('python predict.py model.path=/home/user/app/ indir=/home/user/app/data/ outdir=/home/user/app/dataout/ device=cpu')
- return "./dataout/data_mask.png", "./data/data_mask.png"
-
-
-inputs = [gr.Image(tool="sketch", label="Input", type="numpy"),
- gr.Image(label="Mask", type="numpy"),
- gr.inputs.Radio(choices=["Upload", "Manual", "Automatic (U2net)"],
- type="value", default="Upload", label="Masking option")]
-outputs = [gr.outputs.Image(type="file", label="output"),
- gr.outputs.Image(type="file", label="Mask")]
-title = "LaMa Image Inpainting"
-description = "Gradio demo for LaMa: Resolution-robust Large Mask Inpainting with Fourier Convolutions. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Masks are generated by U^2net"
-article = "Resolution-robust Large Mask Inpainting with Fourier Convolutions | Github Repo
"
-gr.Interface(infer, inputs, outputs, title=title,
- description=description, article=article).launch()
diff --git a/spaces/Amrrs/DragGan-Inversion/gen_images.py b/spaces/Amrrs/DragGan-Inversion/gen_images.py
deleted file mode 100644
index 996bc12f4cde6ee9d0076446250ed076a04b2641..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/gen_images.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Generate images using pretrained network pickle."""
-
-import os
-import re
-from typing import List, Optional, Tuple, Union
-
-import click
-import dnnlib
-import numpy as np
-import PIL.Image
-import torch
-
-import legacy
-
-# ----------------------------------------------------------------------------
-
-
-def parse_range(s: Union[str, List]) -> List[int]:
- '''Parse a comma separated list of numbers or ranges and return a list of ints.
-
- Example: '1,2,5-10' returns [1, 2, 5, 6, 7]
- '''
- if isinstance(s, list):
- return s
- ranges = []
- range_re = re.compile(r'^(\d+)-(\d+)$')
- for p in s.split(','):
- m = range_re.match(p)
- if m:
- ranges.extend(range(int(m.group(1)), int(m.group(2))+1))
- else:
- ranges.append(int(p))
- return ranges
-
-# ----------------------------------------------------------------------------
-
-
-def parse_vec2(s: Union[str, Tuple[float, float]]) -> Tuple[float, float]:
- '''Parse a floating point 2-vector of syntax 'a,b'.
-
- Example:
- '0,1' returns (0,1)
- '''
- if isinstance(s, tuple):
- return s
- parts = s.split(',')
- if len(parts) == 2:
- return (float(parts[0]), float(parts[1]))
- raise ValueError(f'cannot parse 2-vector {s}')
-
-# ----------------------------------------------------------------------------
-
-
-def make_transform(translate: Tuple[float, float], angle: float):
- m = np.eye(3)
- s = np.sin(angle/360.0*np.pi*2)
- c = np.cos(angle/360.0*np.pi*2)
- m[0][0] = c
- m[0][1] = s
- m[0][2] = translate[0]
- m[1][0] = -s
- m[1][1] = c
- m[1][2] = translate[1]
- return m
-
-# ----------------------------------------------------------------------------
-
-
-@click.command()
-@click.option('--network', 'network_pkl', help='Network pickle filename', required=True)
-@click.option('--seeds', type=parse_range, help='List of random seeds (e.g., \'0,1,4-6\')', required=True)
-@click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True)
-@click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)')
-@click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True)
-@click.option('--translate', help='Translate XY-coordinate (e.g. \'0.3,1\')', type=parse_vec2, default='0,0', show_default=True, metavar='VEC2')
-@click.option('--rotate', help='Rotation angle in degrees', type=float, default=0, show_default=True, metavar='ANGLE')
-@click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR')
-def generate_images(
- network_pkl: str,
- seeds: List[int],
- truncation_psi: float,
- noise_mode: str,
- outdir: str,
- translate: Tuple[float, float],
- rotate: float,
- class_idx: Optional[int]
-):
- """Generate images using pretrained network pickle.
-
- Examples:
-
- \b
- # Generate an image using pre-trained AFHQv2 model ("Ours" in Figure 1, left).
- python gen_images.py --outdir=out --trunc=1 --seeds=2 \\
- --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl
-
- \b
- # Generate uncurated images with truncation using the MetFaces-U dataset
- python gen_images.py --outdir=out --trunc=0.7 --seeds=600-605 \\
- --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfacesu-1024x1024.pkl
- """
-
- print('Loading networks from "%s"...' % network_pkl)
- device = torch.device('cuda')
- with dnnlib.util.open_url(network_pkl) as f:
- G = legacy.load_network_pkl(f)['G_ema'].to(device) # type: ignore
- # import pickle
- # G = legacy.load_network_pkl(f)
- # output = open('checkpoints/stylegan2-car-config-f-pt.pkl', 'wb')
- # pickle.dump(G, output)
-
- os.makedirs(outdir, exist_ok=True)
-
- # Labels.
- label = torch.zeros([1, G.c_dim], device=device)
- if G.c_dim != 0:
- if class_idx is None:
- raise click.ClickException(
- 'Must specify class label with --class when using a conditional network')
- label[:, class_idx] = 1
- else:
- if class_idx is not None:
- print('warn: --class=lbl ignored when running on an unconditional network')
-
- # Generate images.
- for seed_idx, seed in enumerate(seeds):
- print('Generating image for seed %d (%d/%d) ...' %
- (seed, seed_idx, len(seeds)))
- z = torch.from_numpy(np.random.RandomState(
- seed).randn(1, G.z_dim)).to(device)
-
- # Construct an inverse rotation/translation matrix and pass to the generator. The
- # generator expects this matrix as an inverse to avoid potentially failing numerical
- # operations in the network.
- if hasattr(G.synthesis, 'input'):
- m = make_transform(translate, rotate)
- m = np.linalg.inv(m)
- G.synthesis.input.transform.copy_(torch.from_numpy(m))
-
- img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode)
- img = (img.permute(0, 2, 3, 1) * 127.5 +
- 128).clamp(0, 255).to(torch.uint8)
- PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(
- f'{outdir}/seed{seed:04d}.png')
-
-
-# ----------------------------------------------------------------------------
-
-if __name__ == "__main__":
- generate_images() # pylint: disable=no-value-for-parameter
-
-# ----------------------------------------------------------------------------
diff --git a/spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/util.py b/spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/util.py
deleted file mode 100644
index 544c94895dfc0bfcd1285fde7cd2c102b71113ed..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/stylegan_human/utils/util.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-import torch
-import cv2
-from torchvision import transforms
-import numpy as np
-import math
-
-
-def visual(output, out_path):
- output = (output + 1)/2
- output = torch.clamp(output, 0, 1)
- if output.shape[1] == 1:
- output = torch.cat([output, output, output], 1)
- output = output[0].detach().cpu().permute(1, 2, 0).numpy()
- output = (output*255).astype(np.uint8)
- output = output[:, :, ::-1]
- cv2.imwrite(out_path, output)
-
-
-def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05):
-
- lr_ramp = min(1, (1 - t) / rampdown)
- lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi)
- lr_ramp = lr_ramp * min(1, t / rampup)
- return initial_lr * lr_ramp
-
-
-def latent_noise(latent, strength):
- noise = torch.randn_like(latent) * strength
-
- return latent + noise
-
-
-def noise_regularize_(noises):
- loss = 0
-
- for noise in noises:
- size = noise.shape[2]
-
- while True:
- loss = (
- loss
- + (noise * torch.roll(noise, shifts=1, dims=3)).mean().pow(2)
- + (noise * torch.roll(noise, shifts=1, dims=2)).mean().pow(2)
- )
-
- if size <= 8:
- break
-
- noise = noise.reshape([-1, 1, size // 2, 2, size // 2, 2])
- noise = noise.mean([3, 5])
- size //= 2
-
- return loss
-
-
-def noise_normalize_(noises):
- for noise in noises:
- mean = noise.mean()
- std = noise.std()
-
- noise.data.add_(-mean).div_(std)
-
-
-def tensor_to_numpy(x):
- x = x[0].permute(1, 2, 0)
- x = torch.clamp(x, -1, 1)
- x = (x+1) * 127.5
- x = x.cpu().detach().numpy().astype(np.uint8)
- return x
-
-
-def numpy_to_tensor(x):
- x = (x / 255 - 0.5) * 2
- x = torch.from_numpy(x).unsqueeze(0).permute(0, 3, 1, 2)
- x = x.cuda().float()
- return x
-
-
-def tensor_to_pil(x):
- x = torch.clamp(x, -1, 1)
- x = (x+1) * 127.5
- return transforms.ToPILImage()(x.squeeze_(0))
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/custom_diffusion/README.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/custom_diffusion/README.md
deleted file mode 100644
index 9e3c387e3d342c270fa72b22643ba7bd7548095e..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/custom_diffusion/README.md
+++ /dev/null
@@ -1,280 +0,0 @@
-# Custom Diffusion training example
-
-[Custom Diffusion](https://arxiv.org/abs/2212.04488) is a method to customize text-to-image models like Stable Diffusion given just a few (4~5) images of a subject.
-The `train_custom_diffusion.py` script shows how to implement the training procedure and adapt it for stable diffusion.
-
-## Running locally with PyTorch
-
-### Installing the dependencies
-
-Before running the scripts, make sure to install the library's training dependencies:
-
-**Important**
-
-To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
-
-```bash
-git clone https://github.com/huggingface/diffusers
-cd diffusers
-pip install -e .
-```
-
-Then cd in the example folder and run
-
-```bash
-pip install -r requirements.txt
-pip install clip-retrieval
-```
-
-And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
-
-```bash
-accelerate config
-```
-
-Or for a default accelerate configuration without answering questions about your environment
-
-```bash
-accelerate config default
-```
-
-Or if your environment doesn't support an interactive shell e.g. a notebook
-
-```python
-from accelerate.utils import write_basic_config
-write_basic_config()
-```
-### Cat example 😺
-
-Now let's get our dataset. Download dataset from [here](https://www.cs.cmu.edu/~custom-diffusion/assets/data.zip) and unzip it.
-
-We also collect 200 real images using `clip-retrieval` which are combined with the target images in the training dataset as a regularization. This prevents overfitting to the the given target image. The following flags enable the regularization `with_prior_preservation`, `real_prior` with `prior_loss_weight=1.`.
-The `class_prompt` should be the category name same as target image. The collected real images are with text captions similar to the `class_prompt`. The retrieved image are saved in `class_data_dir`. You can disable `real_prior` to use generated images as regularization. To collect the real images use this command first before training.
-
-```bash
-pip install clip-retrieval
-python retrieve.py --class_prompt cat --class_data_dir real_reg/samples_cat --num_class_images 200
-```
-
-**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___**
-
-```bash
-export MODEL_NAME="CompVis/stable-diffusion-v1-4"
-export OUTPUT_DIR="path-to-save-model"
-export INSTANCE_DIR="./data/cat"
-
-accelerate launch train_custom_diffusion.py \
- --pretrained_model_name_or_path=$MODEL_NAME \
- --instance_data_dir=$INSTANCE_DIR \
- --output_dir=$OUTPUT_DIR \
- --class_data_dir=./real_reg/samples_cat/ \
- --with_prior_preservation --real_prior --prior_loss_weight=1.0 \
- --class_prompt="cat" --num_class_images=200 \
- --instance_prompt="photo of a cat" \
- --resolution=512 \
- --train_batch_size=2 \
- --learning_rate=1e-5 \
- --lr_warmup_steps=0 \
- --max_train_steps=250 \
- --scale_lr --hflip \
- --modifier_token ""
-```
-
-**Use `--enable_xformers_memory_efficient_attention` for faster training with lower VRAM requirement (16GB per GPU). Follow [this guide](https://github.com/facebookresearch/xformers) for installation instructions.**
-
-To track your experiments using Weights and Biases (`wandb`) and to save intermediate results (whcih we HIGHLY recommend), follow these steps:
-
-* Install `wandb`: `pip install wandb`.
-* Authorize: `wandb login`.
-* Then specify a `validation_prompt` and set `report_to` to `wandb` while launching training. You can also configure the following related arguments:
- * `num_validation_images`
- * `validation_steps`
-
-Here is an example command:
-
-```bash
-accelerate launch train_custom_diffusion.py \
- --pretrained_model_name_or_path=$MODEL_NAME \
- --instance_data_dir=$INSTANCE_DIR \
- --output_dir=$OUTPUT_DIR \
- --class_data_dir=./real_reg/samples_cat/ \
- --with_prior_preservation --real_prior --prior_loss_weight=1.0 \
- --class_prompt="cat" --num_class_images=200 \
- --instance_prompt="photo of a cat" \
- --resolution=512 \
- --train_batch_size=2 \
- --learning_rate=1e-5 \
- --lr_warmup_steps=0 \
- --max_train_steps=250 \
- --scale_lr --hflip \
- --modifier_token "" \
- --validation_prompt=" cat sitting in a bucket" \
- --report_to="wandb"
-```
-
-Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/26ghrcau) where you can check out the intermediate results along with other training details.
-
-If you specify `--push_to_hub`, the learned parameters will be pushed to a repository on the Hugging Face Hub. Here is an [example repository](https://huggingface.co/sayakpaul/custom-diffusion-cat).
-
-### Training on multiple concepts 🐱🪵
-
-Provide a [json](https://github.com/adobe-research/custom-diffusion/blob/main/assets/concept_list.json) file with the info about each concept, similar to [this](https://github.com/ShivamShrirao/diffusers/blob/main/examples/dreambooth/train_dreambooth.py).
-
-To collect the real images run this command for each concept in the json file.
-
-```bash
-pip install clip-retrieval
-python retrieve.py --class_prompt {} --class_data_dir {} --num_class_images 200
-```
-
-And then we're ready to start training!
-
-```bash
-export MODEL_NAME="CompVis/stable-diffusion-v1-4"
-export OUTPUT_DIR="path-to-save-model"
-
-accelerate launch train_custom_diffusion.py \
- --pretrained_model_name_or_path=$MODEL_NAME \
- --output_dir=$OUTPUT_DIR \
- --concepts_list=./concept_list.json \
- --with_prior_preservation --real_prior --prior_loss_weight=1.0 \
- --resolution=512 \
- --train_batch_size=2 \
- --learning_rate=1e-5 \
- --lr_warmup_steps=0 \
- --max_train_steps=500 \
- --num_class_images=200 \
- --scale_lr --hflip \
- --modifier_token "+"
-```
-
-Here is an example [Weights and Biases page](https://wandb.ai/sayakpaul/custom-diffusion/runs/3990tzkg) where you can check out the intermediate results along with other training details.
-
-### Training on human faces
-
-For fine-tuning on human faces we found the following configuration to work better: `learning_rate=5e-6`, `max_train_steps=1000 to 2000`, and `freeze_model=crossattn` with at least 15-20 images.
-
-To collect the real images use this command first before training.
-
-```bash
-pip install clip-retrieval
-python retrieve.py --class_prompt person --class_data_dir real_reg/samples_person --num_class_images 200
-```
-
-Then start training!
-
-```bash
-export MODEL_NAME="CompVis/stable-diffusion-v1-4"
-export OUTPUT_DIR="path-to-save-model"
-export INSTANCE_DIR="path-to-images"
-
-accelerate launch train_custom_diffusion.py \
- --pretrained_model_name_or_path=$MODEL_NAME \
- --instance_data_dir=$INSTANCE_DIR \
- --output_dir=$OUTPUT_DIR \
- --class_data_dir=./real_reg/samples_person/ \
- --with_prior_preservation --real_prior --prior_loss_weight=1.0 \
- --class_prompt="person" --num_class_images=200 \
- --instance_prompt="photo of a person" \
- --resolution=512 \
- --train_batch_size=2 \
- --learning_rate=5e-6 \
- --lr_warmup_steps=0 \
- --max_train_steps=1000 \
- --scale_lr --hflip --noaug \
- --freeze_model crossattn \
- --modifier_token "" \
- --enable_xformers_memory_efficient_attention
-```
-
-## Inference
-
-Once you have trained a model using the above command, you can run inference using the below command. Make sure to include the `modifier token` (e.g. \ in above example) in your prompt.
-
-```python
-import torch
-from diffusers import DiffusionPipeline
-
-pipe = DiffusionPipeline.from_pretrained(
- "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16
-).to("cuda")
-pipe.unet.load_attn_procs(
- "path-to-save-model", weight_name="pytorch_custom_diffusion_weights.bin"
-)
-pipe.load_textual_inversion("path-to-save-model", weight_name=".bin")
-
-image = pipe(
- " cat sitting in a bucket",
- num_inference_steps=100,
- guidance_scale=6.0,
- eta=1.0,
-).images[0]
-image.save("cat.png")
-```
-
-It's possible to directly load these parameters from a Hub repository:
-
-```python
-import torch
-from huggingface_hub.repocard import RepoCard
-from diffusers import DiffusionPipeline
-
-model_id = "sayakpaul/custom-diffusion-cat"
-card = RepoCard.load(model_id)
-base_model_id = card.data.to_dict()["base_model"]
-
-pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to(
-"cuda")
-pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
-pipe.load_textual_inversion(model_id, weight_name=".bin")
-
-image = pipe(
- " cat sitting in a bucket",
- num_inference_steps=100,
- guidance_scale=6.0,
- eta=1.0,
-).images[0]
-image.save("cat.png")
-```
-
-Here is an example of performing inference with multiple concepts:
-
-```python
-import torch
-from huggingface_hub.repocard import RepoCard
-from diffusers import DiffusionPipeline
-
-model_id = "sayakpaul/custom-diffusion-cat-wooden-pot"
-card = RepoCard.load(model_id)
-base_model_id = card.data.to_dict()["base_model"]
-
-pipe = DiffusionPipeline.from_pretrained(base_model_id, torch_dtype=torch.float16).to(
-"cuda")
-pipe.unet.load_attn_procs(model_id, weight_name="pytorch_custom_diffusion_weights.bin")
-pipe.load_textual_inversion(model_id, weight_name=".bin")
-pipe.load_textual_inversion(model_id, weight_name=".bin")
-
-image = pipe(
- "the cat sculpture in the style of a wooden pot",
- num_inference_steps=100,
- guidance_scale=6.0,
- eta=1.0,
-).images[0]
-image.save("multi-subject.png")
-```
-
-Here, `cat` and `wooden pot` refer to the multiple concepts.
-
-### Inference from a training checkpoint
-
-You can also perform inference from one of the complete checkpoint saved during the training process, if you used the `--checkpointing_steps` argument.
-
-TODO.
-
-## Set grads to none
-To save even more memory, pass the `--set_grads_to_none` argument to the script. This will set grads to None instead of zero. However, be aware that it changes certain behaviors, so if you start experiencing any problems, remove this argument.
-
-More info: https://pytorch.org/docs/stable/generated/torch.optim.Optimizer.zero_grad.html
-
-## Experimental results
-You can refer to [our webpage](https://www.cs.cmu.edu/~custom-diffusion/) that discusses our experiments in detail. We also released a more extensive dataset of 101 concepts for evaluating model customization methods. For more details please refer to our [dataset webpage](https://www.cs.cmu.edu/~custom-diffusion/dataset.html).
\ No newline at end of file
diff --git a/spaces/Andy1621/IAT_enhancement/model/blocks.py b/spaces/Andy1621/IAT_enhancement/model/blocks.py
deleted file mode 100644
index 38d2f2160959c0441ff324f220d588fde9033a1b..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/IAT_enhancement/model/blocks.py
+++ /dev/null
@@ -1,281 +0,0 @@
-"""
-Code copy from uniformer source code:
-https://github.com/Sense-X/UniFormer
-"""
-import os
-import torch
-import torch.nn as nn
-from functools import partial
-import math
-from timm.models.vision_transformer import VisionTransformer, _cfg
-from timm.models.registry import register_model
-from timm.models.layers import trunc_normal_, DropPath, to_2tuple
-
-# ResMLP's normalization
-class Aff(nn.Module):
- def __init__(self, dim):
- super().__init__()
- # learnable
- self.alpha = nn.Parameter(torch.ones([1, 1, dim]))
- self.beta = nn.Parameter(torch.zeros([1, 1, dim]))
-
- def forward(self, x):
- x = x * self.alpha + self.beta
- return x
-
-# Color Normalization
-class Aff_channel(nn.Module):
- def __init__(self, dim, channel_first = True):
- super().__init__()
- # learnable
- self.alpha = nn.Parameter(torch.ones([1, 1, dim]))
- self.beta = nn.Parameter(torch.zeros([1, 1, dim]))
- self.color = nn.Parameter(torch.eye(dim))
- self.channel_first = channel_first
-
- def forward(self, x):
- if self.channel_first:
- x1 = torch.tensordot(x, self.color, dims=[[-1], [-1]])
- x2 = x1 * self.alpha + self.beta
- else:
- x1 = x * self.alpha + self.beta
- x2 = torch.tensordot(x1, self.color, dims=[[-1], [-1]])
- return x2
-
-class Mlp(nn.Module):
- # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-class CMlp(nn.Module):
- # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
- self.act = act_layer()
- self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-class CBlock_ln(nn.Module):
- def __init__(self, dim, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
- drop_path=0., act_layer=nn.GELU, norm_layer=Aff_channel, init_values=1e-4):
- super().__init__()
- self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
- #self.norm1 = Aff_channel(dim)
- self.norm1 = norm_layer(dim)
- self.conv1 = nn.Conv2d(dim, dim, 1)
- self.conv2 = nn.Conv2d(dim, dim, 1)
- self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
- # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- #self.norm2 = Aff_channel(dim)
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.gamma_1 = nn.Parameter(init_values * torch.ones((1, dim, 1, 1)), requires_grad=True)
- self.gamma_2 = nn.Parameter(init_values * torch.ones((1, dim, 1, 1)), requires_grad=True)
- self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- def forward(self, x):
- x = x + self.pos_embed(x)
- B, C, H, W = x.shape
- #print(x.shape)
- norm_x = x.flatten(2).transpose(1, 2)
- #print(norm_x.shape)
- norm_x = self.norm1(norm_x)
- norm_x = norm_x.view(B, H, W, C).permute(0, 3, 1, 2)
-
-
- x = x + self.drop_path(self.gamma_1*self.conv2(self.attn(self.conv1(norm_x))))
- norm_x = x.flatten(2).transpose(1, 2)
- norm_x = self.norm2(norm_x)
- norm_x = norm_x.view(B, H, W, C).permute(0, 3, 1, 2)
- x = x + self.drop_path(self.gamma_2*self.mlp(norm_x))
- return x
-
-
-def window_partition(x, window_size):
- """
- Args:
- x: (B, H, W, C)
- window_size (int): window size
- Returns:
- windows: (num_windows*B, window_size, window_size, C)
- """
- B, H, W, C = x.shape
- #print(x.shape)
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
- return windows
-
-
-def window_reverse(windows, window_size, H, W):
- """
- Args:
- windows: (num_windows*B, window_size, window_size, C)
- window_size (int): Window size
- H (int): Height of image
- W (int): Width of image
- Returns:
- x: (B, H, W, C)
- """
- B = int(windows.shape[0] / (H * W / window_size / window_size))
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
- return x
-
-
-class WindowAttention(nn.Module):
- r""" Window based multi-head self attention (W-MSA) module with relative position bias.
- It supports both of shifted and non-shifted window.
- Args:
- dim (int): Number of input channels.
- window_size (tuple[int]): The height and width of the window.
- num_heads (int): Number of attention heads.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
- attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
- proj_drop (float, optional): Dropout ratio of output. Default: 0.0
- """
-
- def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
- super().__init__()
- self.dim = dim
- self.window_size = window_size # Wh, Ww
- self.num_heads = num_heads
- head_dim = dim // num_heads
- self.scale = qk_scale or head_dim ** -0.5
-
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
-
- self.softmax = nn.Softmax(dim=-1)
-
- def forward(self, x):
- B_, N, C = x.shape
- qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
-
- q = q * self.scale
- attn = (q @ k.transpose(-2, -1))
-
- attn = self.softmax(attn)
-
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
-## Layer_norm, Aff_norm, Aff_channel_norm
-class SwinTransformerBlock(nn.Module):
- r""" Swin Transformer Block.
- Args:
- dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resulotion.
- num_heads (int): Number of attention heads.
- window_size (int): Window size.
- shift_size (int): Shift size for SW-MSA.
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
- drop (float, optional): Dropout rate. Default: 0.0
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
- drop_path (float, optional): Stochastic depth rate. Default: 0.0
- act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
- """
-
- def __init__(self, dim, num_heads=2, window_size=8, shift_size=0,
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
- act_layer=nn.GELU, norm_layer=Aff_channel):
- super().__init__()
- self.dim = dim
- self.num_heads = num_heads
- self.window_size = window_size
- self.shift_size = shift_size
- self.mlp_ratio = mlp_ratio
-
- self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
- #self.norm1 = norm_layer(dim)
- self.norm1 = norm_layer(dim)
- self.attn = WindowAttention(
- dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
- qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
-
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- #self.norm2 = norm_layer(dim)
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- def forward(self, x):
- x = x + self.pos_embed(x)
- B, C, H, W = x.shape
- x = x.flatten(2).transpose(1, 2)
-
- shortcut = x
- x = self.norm1(x)
- x = x.view(B, H, W, C)
-
- # cyclic shift
- if self.shift_size > 0:
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
- else:
- shifted_x = x
-
- # partition windows
- x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
- x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
-
- # W-MSA/SW-MSA
- attn_windows = self.attn(x_windows) # nW*B, window_size*window_size, C
-
- # merge windows
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
- shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
-
- x = shifted_x
- x = x.view(B, H * W, C)
-
- # FFN
- x = shortcut + self.drop_path(x)
- x = x + self.drop_path(self.mlp(self.norm2(x)))
- x = x.transpose(1, 2).reshape(B, C, H, W)
-
- return x
-
-
-if __name__ == "__main__":
- os.environ['CUDA_VISIBLE_DEVICES']='1'
- cb_blovk = CBlock_ln(dim = 16)
- x = torch.Tensor(1, 16, 400, 600)
- swin = SwinTransformerBlock(dim=16, num_heads=4)
- x = cb_blovk(x)
- print(x.shape)
diff --git a/spaces/Andy1621/uniformer_video_demo/kinetics_class_index.py b/spaces/Andy1621/uniformer_video_demo/kinetics_class_index.py
deleted file mode 100644
index 597e23e72c690f2dce0525b24bdcc2a992c4d594..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_video_demo/kinetics_class_index.py
+++ /dev/null
@@ -1,402 +0,0 @@
-kinetics_classnames = {
- "0": "riding a bike",
- "1": "marching",
- "2": "dodgeball",
- "3": "playing cymbals",
- "4": "checking tires",
- "5": "roller skating",
- "6": "tasting beer",
- "7": "clapping",
- "8": "drawing",
- "9": "juggling fire",
- "10": "bobsledding",
- "11": "petting animal (not cat)",
- "12": "spray painting",
- "13": "training dog",
- "14": "eating watermelon",
- "15": "building cabinet",
- "16": "applauding",
- "17": "playing harp",
- "18": "balloon blowing",
- "19": "sled dog racing",
- "20": "wrestling",
- "21": "pole vault",
- "22": "hurling (sport)",
- "23": "riding scooter",
- "24": "shearing sheep",
- "25": "sweeping floor",
- "26": "eating carrots",
- "27": "skateboarding",
- "28": "dunking basketball",
- "29": "disc golfing",
- "30": "eating spaghetti",
- "31": "playing flute",
- "32": "riding mechanical bull",
- "33": "making sushi",
- "34": "trapezing",
- "35": "picking fruit",
- "36": "stretching leg",
- "37": "playing ukulele",
- "38": "tying tie",
- "39": "skydiving",
- "40": "playing cello",
- "41": "jumping into pool",
- "42": "shooting goal (soccer)",
- "43": "trimming trees",
- "44": "bookbinding",
- "45": "ski jumping",
- "46": "walking the dog",
- "47": "riding unicycle",
- "48": "shaving head",
- "49": "hopscotch",
- "50": "playing piano",
- "51": "parasailing",
- "52": "bartending",
- "53": "kicking field goal",
- "54": "finger snapping",
- "55": "dining",
- "56": "yawning",
- "57": "peeling potatoes",
- "58": "canoeing or kayaking",
- "59": "front raises",
- "60": "laughing",
- "61": "dancing macarena",
- "62": "digging",
- "63": "reading newspaper",
- "64": "hitting baseball",
- "65": "clay pottery making",
- "66": "exercising with an exercise ball",
- "67": "playing saxophone",
- "68": "shooting basketball",
- "69": "washing hair",
- "70": "lunge",
- "71": "brushing hair",
- "72": "curling hair",
- "73": "kitesurfing",
- "74": "tapping guitar",
- "75": "bending back",
- "76": "skipping rope",
- "77": "situp",
- "78": "folding paper",
- "79": "cracking neck",
- "80": "assembling computer",
- "81": "cleaning gutters",
- "82": "blowing out candles",
- "83": "shaking hands",
- "84": "dancing gangnam style",
- "85": "windsurfing",
- "86": "tap dancing",
- "87": "skiing (not slalom or crosscountry)",
- "88": "bandaging",
- "89": "push up",
- "90": "doing nails",
- "91": "punching person (boxing)",
- "92": "bouncing on trampoline",
- "93": "scrambling eggs",
- "94": "singing",
- "95": "cleaning floor",
- "96": "krumping",
- "97": "drumming fingers",
- "98": "snowmobiling",
- "99": "gymnastics tumbling",
- "100": "headbanging",
- "101": "catching or throwing frisbee",
- "102": "riding elephant",
- "103": "bee keeping",
- "104": "feeding birds",
- "105": "snatch weight lifting",
- "106": "mowing lawn",
- "107": "fixing hair",
- "108": "playing trumpet",
- "109": "flying kite",
- "110": "crossing river",
- "111": "swinging legs",
- "112": "sanding floor",
- "113": "belly dancing",
- "114": "sneezing",
- "115": "clean and jerk",
- "116": "side kick",
- "117": "filling eyebrows",
- "118": "shuffling cards",
- "119": "recording music",
- "120": "cartwheeling",
- "121": "feeding fish",
- "122": "folding clothes",
- "123": "water skiing",
- "124": "tobogganing",
- "125": "blowing leaves",
- "126": "smoking",
- "127": "unboxing",
- "128": "tai chi",
- "129": "waxing legs",
- "130": "riding camel",
- "131": "slapping",
- "132": "tossing salad",
- "133": "capoeira",
- "134": "playing cards",
- "135": "playing organ",
- "136": "playing violin",
- "137": "playing drums",
- "138": "tapping pen",
- "139": "vault",
- "140": "shoveling snow",
- "141": "playing tennis",
- "142": "getting a tattoo",
- "143": "making a sandwich",
- "144": "making tea",
- "145": "grinding meat",
- "146": "squat",
- "147": "eating doughnuts",
- "148": "ice fishing",
- "149": "snowkiting",
- "150": "kicking soccer ball",
- "151": "playing controller",
- "152": "giving or receiving award",
- "153": "welding",
- "154": "throwing discus",
- "155": "throwing axe",
- "156": "ripping paper",
- "157": "swimming butterfly stroke",
- "158": "air drumming",
- "159": "blowing nose",
- "160": "hockey stop",
- "161": "taking a shower",
- "162": "bench pressing",
- "163": "planting trees",
- "164": "pumping fist",
- "165": "climbing tree",
- "166": "tickling",
- "167": "high kick",
- "168": "waiting in line",
- "169": "slacklining",
- "170": "tango dancing",
- "171": "hurdling",
- "172": "carrying baby",
- "173": "celebrating",
- "174": "sharpening knives",
- "175": "passing American football (in game)",
- "176": "headbutting",
- "177": "playing recorder",
- "178": "brush painting",
- "179": "garbage collecting",
- "180": "robot dancing",
- "181": "shredding paper",
- "182": "pumping gas",
- "183": "rock climbing",
- "184": "hula hooping",
- "185": "braiding hair",
- "186": "opening present",
- "187": "texting",
- "188": "decorating the christmas tree",
- "189": "answering questions",
- "190": "playing keyboard",
- "191": "writing",
- "192": "bungee jumping",
- "193": "sniffing",
- "194": "eating burger",
- "195": "playing accordion",
- "196": "making pizza",
- "197": "playing volleyball",
- "198": "tasting food",
- "199": "pushing cart",
- "200": "spinning poi",
- "201": "cleaning windows",
- "202": "arm wrestling",
- "203": "changing oil",
- "204": "swimming breast stroke",
- "205": "tossing coin",
- "206": "deadlifting",
- "207": "hoverboarding",
- "208": "cutting watermelon",
- "209": "cheerleading",
- "210": "snorkeling",
- "211": "washing hands",
- "212": "eating cake",
- "213": "pull ups",
- "214": "surfing water",
- "215": "eating hotdog",
- "216": "holding snake",
- "217": "playing harmonica",
- "218": "ironing",
- "219": "cutting nails",
- "220": "golf chipping",
- "221": "shot put",
- "222": "hugging",
- "223": "playing clarinet",
- "224": "faceplanting",
- "225": "trimming or shaving beard",
- "226": "drinking shots",
- "227": "riding mountain bike",
- "228": "tying bow tie",
- "229": "swinging on something",
- "230": "skiing crosscountry",
- "231": "unloading truck",
- "232": "cleaning pool",
- "233": "jogging",
- "234": "ice climbing",
- "235": "mopping floor",
- "236": "making bed",
- "237": "diving cliff",
- "238": "washing dishes",
- "239": "grooming dog",
- "240": "weaving basket",
- "241": "frying vegetables",
- "242": "stomping grapes",
- "243": "moving furniture",
- "244": "cooking sausages",
- "245": "doing laundry",
- "246": "dying hair",
- "247": "knitting",
- "248": "reading book",
- "249": "baby waking up",
- "250": "punching bag",
- "251": "surfing crowd",
- "252": "cooking chicken",
- "253": "pushing car",
- "254": "springboard diving",
- "255": "swing dancing",
- "256": "massaging legs",
- "257": "beatboxing",
- "258": "breading or breadcrumbing",
- "259": "somersaulting",
- "260": "brushing teeth",
- "261": "stretching arm",
- "262": "juggling balls",
- "263": "massaging person's head",
- "264": "eating ice cream",
- "265": "extinguishing fire",
- "266": "hammer throw",
- "267": "whistling",
- "268": "crawling baby",
- "269": "using remote controller (not gaming)",
- "270": "playing cricket",
- "271": "opening bottle",
- "272": "playing xylophone",
- "273": "motorcycling",
- "274": "driving car",
- "275": "exercising arm",
- "276": "passing American football (not in game)",
- "277": "playing kickball",
- "278": "sticking tongue out",
- "279": "flipping pancake",
- "280": "catching fish",
- "281": "eating chips",
- "282": "shaking head",
- "283": "sword fighting",
- "284": "playing poker",
- "285": "cooking on campfire",
- "286": "doing aerobics",
- "287": "paragliding",
- "288": "using segway",
- "289": "folding napkins",
- "290": "playing bagpipes",
- "291": "gargling",
- "292": "skiing slalom",
- "293": "strumming guitar",
- "294": "javelin throw",
- "295": "waxing back",
- "296": "riding or walking with horse",
- "297": "plastering",
- "298": "long jump",
- "299": "parkour",
- "300": "wrapping present",
- "301": "egg hunting",
- "302": "archery",
- "303": "cleaning toilet",
- "304": "swimming backstroke",
- "305": "snowboarding",
- "306": "catching or throwing baseball",
- "307": "massaging back",
- "308": "blowing glass",
- "309": "playing guitar",
- "310": "playing chess",
- "311": "golf driving",
- "312": "presenting weather forecast",
- "313": "rock scissors paper",
- "314": "high jump",
- "315": "baking cookies",
- "316": "using computer",
- "317": "washing feet",
- "318": "arranging flowers",
- "319": "playing bass guitar",
- "320": "spraying",
- "321": "cutting pineapple",
- "322": "waxing chest",
- "323": "auctioning",
- "324": "jetskiing",
- "325": "drinking",
- "326": "busking",
- "327": "playing monopoly",
- "328": "salsa dancing",
- "329": "waxing eyebrows",
- "330": "watering plants",
- "331": "zumba",
- "332": "chopping wood",
- "333": "pushing wheelchair",
- "334": "carving pumpkin",
- "335": "building shed",
- "336": "making jewelry",
- "337": "catching or throwing softball",
- "338": "bending metal",
- "339": "ice skating",
- "340": "dancing charleston",
- "341": "abseiling",
- "342": "climbing a rope",
- "343": "crying",
- "344": "cleaning shoes",
- "345": "dancing ballet",
- "346": "driving tractor",
- "347": "triple jump",
- "348": "throwing ball",
- "349": "getting a haircut",
- "350": "running on treadmill",
- "351": "climbing ladder",
- "352": "blasting sand",
- "353": "playing trombone",
- "354": "drop kicking",
- "355": "country line dancing",
- "356": "changing wheel",
- "357": "feeding goats",
- "358": "tying knot (not on a tie)",
- "359": "setting table",
- "360": "shaving legs",
- "361": "kissing",
- "362": "riding mule",
- "363": "counting money",
- "364": "laying bricks",
- "365": "barbequing",
- "366": "news anchoring",
- "367": "smoking hookah",
- "368": "cooking egg",
- "369": "peeling apples",
- "370": "yoga",
- "371": "sharpening pencil",
- "372": "dribbling basketball",
- "373": "petting cat",
- "374": "playing ice hockey",
- "375": "milking cow",
- "376": "shining shoes",
- "377": "juggling soccer ball",
- "378": "scuba diving",
- "379": "playing squash or racquetball",
- "380": "drinking beer",
- "381": "sign language interpreting",
- "382": "playing basketball",
- "383": "breakdancing",
- "384": "testifying",
- "385": "making snowman",
- "386": "golf putting",
- "387": "playing didgeridoo",
- "388": "biking through snow",
- "389": "sailing",
- "390": "jumpstyle dancing",
- "391": "water sliding",
- "392": "grooming horse",
- "393": "massaging feet",
- "394": "playing paintball",
- "395": "making a cake",
- "396": "bowling",
- "397": "contact juggling",
- "398": "applying cream",
- "399": "playing badminton"
-}
\ No newline at end of file
diff --git a/spaces/AquaSuisei/ChatGPTXE/modules/overwrites.py b/spaces/AquaSuisei/ChatGPTXE/modules/overwrites.py
deleted file mode 100644
index bfcd4d01b7d7bec1184a8d09113933bca860530b..0000000000000000000000000000000000000000
--- a/spaces/AquaSuisei/ChatGPTXE/modules/overwrites.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from __future__ import annotations
-import logging
-
-from llama_index import Prompt
-from typing import List, Tuple
-import mdtex2html
-
-from modules.presets import *
-from modules.llama_func import *
-
-
-def compact_text_chunks(self, prompt: Prompt, text_chunks: List[str]) -> List[str]:
- logging.debug("Compacting text chunks...🚀🚀🚀")
- combined_str = [c.strip() for c in text_chunks if c.strip()]
- combined_str = [f"[{index+1}] {c}" for index, c in enumerate(combined_str)]
- combined_str = "\n\n".join(combined_str)
- # resplit based on self.max_chunk_overlap
- text_splitter = self.get_text_splitter_given_prompt(prompt, 1, padding=1)
- return text_splitter.split_text(combined_str)
-
-
-def postprocess(
- self, y: List[Tuple[str | None, str | None]]
-) -> List[Tuple[str | None, str | None]]:
- """
- Parameters:
- y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
- Returns:
- List of tuples representing the message and response. Each message and response will be a string of HTML.
- """
- if y is None or y == []:
- return []
- user, bot = y[-1]
- if not detect_converted_mark(user):
- user = convert_asis(user)
- if not detect_converted_mark(bot):
- bot = convert_mdtext(bot)
- y[-1] = (user, bot)
- return y
-
-with open("./assets/custom.js", "r", encoding="utf-8") as f, open("./assets/Kelpy-Codos.js", "r", encoding="utf-8") as f2:
- customJS = f.read()
- kelpyCodos = f2.read()
-
-def reload_javascript():
- print("Reloading javascript...")
- js = f''
- def template_response(*args, **kwargs):
- res = GradioTemplateResponseOriginal(*args, **kwargs)
- res.body = res.body.replace(b'